aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 20:54:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 20:54:03 -0500
commit81ec44a6c69342fec1b1140c60a604027e429f69 (patch)
treeee6bec8a94ef28e111bf766cf4b7a9366cb4f7c1
parent48a732dfaa77a4dfec803aa8f248373998704f76 (diff)
parente80cfc31d872b6b85b8966bce6ba80bee401a7dd (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 update from Martin Schwidefsky: "The most prominent change in this patch set is the software dirty bit patch for s390. It removes __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY and the page_test_and_clear_dirty primitive which makes the common memory management code a bit less obscure. Heiko fixed most of the PCI related fallout, more often than not missing GENERIC_HARDIRQS dependencies. Notable is one of the 3270 patches which adds an export to tty_io to be able to resize a tty. The rest is the usual bunch of cleanups and bug fixes." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/module: Add missing R_390_NONE relocation type drivers/gpio: add missing GENERIC_HARDIRQ dependency drivers/input: add couple of missing GENERIC_HARDIRQS dependencies s390/cleanup: rename SPP to LPP s390/mm: implement software dirty bits s390/mm: Fix crst upgrade of mmap with MAP_FIXED s390/linker skript: discard exit.data at runtime drivers/media: add missing GENERIC_HARDIRQS dependency s390/bpf,jit: add vlan tag support drivers/net,AT91RM9200: add missing GENERIC_HARDIRQS dependency iucv: fix kernel panic at reboot s390/Kconfig: sort list of arch selected config options phylib: remove !S390 dependeny from Kconfig uio: remove !S390 dependency from Kconfig dasd: fix sysfs cleanup in dasd_generic_remove s390/pci: fix hotplug module init s390/pci: cleanup clp page allocation s390/pci: cleanup clp inline assembly s390/perf: cpum_cf: fallback to software sampling events s390/mm: provide PAGE_SHARED define ...
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/s390/Kconfig113
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/appldata/appldata_net_sum.c2
-rw-r--r--arch/s390/appldata/appldata_os.c2
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/barrier.h9
-rw-r--r--arch/s390/include/asm/clp.h2
-rw-r--r--arch/s390/include/asm/cpu_mf.h4
-rw-r--r--arch/s390/include/asm/dma-mapping.h8
-rw-r--r--arch/s390/include/asm/mman.h4
-rw-r--r--arch/s390/include/asm/page.h22
-rw-r--r--arch/s390/include/asm/pci.h11
-rw-r--r--arch/s390/include/asm/pgtable.h132
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h22
-rw-r--r--arch/s390/include/asm/timex.h18
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kernel/dis.c1
-rw-r--r--arch/s390/kernel/early.c8
-rw-r--r--arch/s390/kernel/entry64.S10
-rw-r--r--arch/s390/kernel/ipl.c16
-rw-r--r--arch/s390/kernel/module.c143
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c13
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/time.c26
-rw-r--r--arch/s390/kernel/vmlinux.lds.S4
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/lib/delay.c16
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/mm/mmap.c9
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/vmem.c24
-rw-r--r--arch/s390/net/bpf_jit_comp.c21
-rw-r--r--arch/s390/pci/pci.c35
-rw-r--r--arch/s390/pci/pci_clp.c14
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/media/radio/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c60
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_3990_erp.c8
-rw-r--r--drivers/s390/block/dasd_alias.c4
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c30
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/scm_blk.h41
-rw-r--r--drivers/s390/char/fs3270.c29
-rw-r--r--drivers/s390/char/raw3270.c611
-rw-r--r--drivers/s390/char/raw3270.h12
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/char/sclp_cmd.c10
-rw-r--r--drivers/s390/char/tty3270.c187
-rw-r--r--drivers/s390/char/zcore.c64
-rw-r--r--drivers/s390/cio/chsc.c68
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c10
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/cio/device_pgid.c123
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/tty/tty_io.c1
-rw-r--r--drivers/uio/Kconfig1
-rw-r--r--include/asm-generic/io.h20
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--mm/rmap.c24
-rw-r--r--net/iucv/iucv.c5
82 files changed, 1216 insertions, 930 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index db061e961d37..73b7d50c9407 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6519,7 +6519,7 @@ S: Supported
6519F: drivers/s390/net/ 6519F: drivers/s390/net/
6520 6520
6521S390 ZCRYPT DRIVER 6521S390 ZCRYPT DRIVER
6522M: Holger Dengler <hd@linux.vnet.ibm.com> 6522M: Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
6523M: linux390@de.ibm.com 6523M: linux390@de.ibm.com
6524L: linux-s390@vger.kernel.org 6524L: linux-s390@vger.kernel.org
6525W: http://www.ibm.com/developerworks/linux/linux390/ 6525W: http://www.ibm.com/developerworks/linux/linux390/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 27c91c38d1a1..b220e152aefa 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -60,85 +60,86 @@ config PCI_QUIRKS
60 60
61config S390 61config S390
62 def_bool y 62 def_bool y
63 select USE_GENERIC_SMP_HELPERS if SMP
64 select GENERIC_CPU_DEVICES if !SMP
65 select HAVE_SYSCALL_WRAPPERS
66 select HAVE_FUNCTION_TRACER
67 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
68 select HAVE_FTRACE_MCOUNT_RECORD
69 select HAVE_C_RECORDMCOUNT
70 select HAVE_SYSCALL_TRACEPOINTS
71 select SYSCTL_EXCEPTION_TRACE
72 select HAVE_DYNAMIC_FTRACE
73 select HAVE_FUNCTION_GRAPH_TRACER
74 select HAVE_REGS_AND_STACK_ACCESS_API
75 select HAVE_OPROFILE
76 select HAVE_KPROBES
77 select HAVE_KRETPROBES
78 select HAVE_KVM if 64BIT
79 select HAVE_ARCH_TRACEHOOK
80 select INIT_ALL_POSSIBLE
81 select HAVE_PERF_EVENTS
82 select ARCH_HAVE_NMI_SAFE_CMPXCHG
83 select HAVE_DEBUG_KMEMLEAK
84 select HAVE_KERNEL_GZIP
85 select HAVE_KERNEL_BZIP2
86 select HAVE_KERNEL_LZMA
87 select HAVE_KERNEL_LZO
88 select HAVE_KERNEL_XZ
89 select HAVE_ARCH_MUTEX_CPU_RELAX
90 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
91 select HAVE_BPF_JIT if 64BIT && PACK_STACK
92 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
93 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
94 select HAVE_MEMBLOCK
95 select HAVE_MEMBLOCK_NODE_MAP
96 select HAVE_CMPXCHG_LOCAL
97 select HAVE_CMPXCHG_DOUBLE
98 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
99 select HAVE_VIRT_CPU_ACCOUNTING
100 select VIRT_CPU_ACCOUNTING
101 select ARCH_DISCARD_MEMBLOCK 63 select ARCH_DISCARD_MEMBLOCK
102 select BUILDTIME_EXTABLE_SORT 64 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
103 select ARCH_INLINE_SPIN_TRYLOCK 65 select ARCH_HAVE_NMI_SAFE_CMPXCHG
104 select ARCH_INLINE_SPIN_TRYLOCK_BH
105 select ARCH_INLINE_SPIN_LOCK
106 select ARCH_INLINE_SPIN_LOCK_BH
107 select ARCH_INLINE_SPIN_LOCK_IRQ
108 select ARCH_INLINE_SPIN_LOCK_IRQSAVE
109 select ARCH_INLINE_SPIN_UNLOCK
110 select ARCH_INLINE_SPIN_UNLOCK_BH
111 select ARCH_INLINE_SPIN_UNLOCK_IRQ
112 select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
113 select ARCH_INLINE_READ_TRYLOCK
114 select ARCH_INLINE_READ_LOCK 66 select ARCH_INLINE_READ_LOCK
115 select ARCH_INLINE_READ_LOCK_BH 67 select ARCH_INLINE_READ_LOCK_BH
116 select ARCH_INLINE_READ_LOCK_IRQ 68 select ARCH_INLINE_READ_LOCK_IRQ
117 select ARCH_INLINE_READ_LOCK_IRQSAVE 69 select ARCH_INLINE_READ_LOCK_IRQSAVE
70 select ARCH_INLINE_READ_TRYLOCK
118 select ARCH_INLINE_READ_UNLOCK 71 select ARCH_INLINE_READ_UNLOCK
119 select ARCH_INLINE_READ_UNLOCK_BH 72 select ARCH_INLINE_READ_UNLOCK_BH
120 select ARCH_INLINE_READ_UNLOCK_IRQ 73 select ARCH_INLINE_READ_UNLOCK_IRQ
121 select ARCH_INLINE_READ_UNLOCK_IRQRESTORE 74 select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
122 select ARCH_INLINE_WRITE_TRYLOCK 75 select ARCH_INLINE_SPIN_LOCK
76 select ARCH_INLINE_SPIN_LOCK_BH
77 select ARCH_INLINE_SPIN_LOCK_IRQ
78 select ARCH_INLINE_SPIN_LOCK_IRQSAVE
79 select ARCH_INLINE_SPIN_TRYLOCK
80 select ARCH_INLINE_SPIN_TRYLOCK_BH
81 select ARCH_INLINE_SPIN_UNLOCK
82 select ARCH_INLINE_SPIN_UNLOCK_BH
83 select ARCH_INLINE_SPIN_UNLOCK_IRQ
84 select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
123 select ARCH_INLINE_WRITE_LOCK 85 select ARCH_INLINE_WRITE_LOCK
124 select ARCH_INLINE_WRITE_LOCK_BH 86 select ARCH_INLINE_WRITE_LOCK_BH
125 select ARCH_INLINE_WRITE_LOCK_IRQ 87 select ARCH_INLINE_WRITE_LOCK_IRQ
126 select ARCH_INLINE_WRITE_LOCK_IRQSAVE 88 select ARCH_INLINE_WRITE_LOCK_IRQSAVE
89 select ARCH_INLINE_WRITE_TRYLOCK
127 select ARCH_INLINE_WRITE_UNLOCK 90 select ARCH_INLINE_WRITE_UNLOCK
128 select ARCH_INLINE_WRITE_UNLOCK_BH 91 select ARCH_INLINE_WRITE_UNLOCK_BH
129 select ARCH_INLINE_WRITE_UNLOCK_IRQ 92 select ARCH_INLINE_WRITE_UNLOCK_IRQ
130 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
131 select HAVE_UID16 if 32BIT 94 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
132 select ARCH_WANT_IPC_PARSE_VERSION 95 select ARCH_WANT_IPC_PARSE_VERSION
133 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 96 select BUILDTIME_EXTABLE_SORT
97 select CLONE_BACKWARDS2
98 select GENERIC_CLOCKEVENTS
99 select GENERIC_CPU_DEVICES if !SMP
100 select GENERIC_KERNEL_THREAD
134 select GENERIC_SMP_IDLE_THREAD 101 select GENERIC_SMP_IDLE_THREAD
135 select GENERIC_TIME_VSYSCALL_OLD 102 select GENERIC_TIME_VSYSCALL_OLD
136 select GENERIC_CLOCKEVENTS 103 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
137 select KTIME_SCALAR if 32BIT 104 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
105 select HAVE_ARCH_MUTEX_CPU_RELAX
138 select HAVE_ARCH_SECCOMP_FILTER 106 select HAVE_ARCH_SECCOMP_FILTER
107 select HAVE_ARCH_TRACEHOOK
108 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
109 select HAVE_BPF_JIT if 64BIT && PACK_STACK
110 select HAVE_CMPXCHG_DOUBLE
111 select HAVE_CMPXCHG_LOCAL
112 select HAVE_C_RECORDMCOUNT
113 select HAVE_DEBUG_KMEMLEAK
114 select HAVE_DYNAMIC_FTRACE
115 select HAVE_FTRACE_MCOUNT_RECORD
116 select HAVE_FUNCTION_GRAPH_TRACER
117 select HAVE_FUNCTION_TRACER
118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
119 select HAVE_KERNEL_BZIP2
120 select HAVE_KERNEL_GZIP
121 select HAVE_KERNEL_LZMA
122 select HAVE_KERNEL_LZO
123 select HAVE_KERNEL_XZ
124 select HAVE_KPROBES
125 select HAVE_KRETPROBES
126 select HAVE_KVM if 64BIT
127 select HAVE_MEMBLOCK
128 select HAVE_MEMBLOCK_NODE_MAP
139 select HAVE_MOD_ARCH_SPECIFIC 129 select HAVE_MOD_ARCH_SPECIFIC
130 select HAVE_OPROFILE
131 select HAVE_PERF_EVENTS
132 select HAVE_REGS_AND_STACK_ACCESS_API
133 select HAVE_SYSCALL_TRACEPOINTS
134 select HAVE_SYSCALL_WRAPPERS
135 select HAVE_UID16 if 32BIT
136 select HAVE_VIRT_CPU_ACCOUNTING
137 select INIT_ALL_POSSIBLE
138 select KTIME_SCALAR if 32BIT
140 select MODULES_USE_ELF_RELA 139 select MODULES_USE_ELF_RELA
141 select CLONE_BACKWARDS2 140 select SYSCTL_EXCEPTION_TRACE
141 select USE_GENERIC_SMP_HELPERS if SMP
142 select VIRT_CPU_ACCOUNTING
142 143
143config SCHED_OMIT_FRAME_POINTER 144config SCHED_OMIT_FRAME_POINTER
144 def_bool y 145 def_bool y
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 02d9a1cf5057..7ef60b52d6e0 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -108,7 +108,7 @@ static void appldata_get_mem_data(void *data)
108 mem_data->totalswap = P2K(val.totalswap); 108 mem_data->totalswap = P2K(val.totalswap);
109 mem_data->freeswap = P2K(val.freeswap); 109 mem_data->freeswap = P2K(val.freeswap);
110 110
111 mem_data->timestamp = get_clock(); 111 mem_data->timestamp = get_tod_clock();
112 mem_data->sync_count_2++; 112 mem_data->sync_count_2++;
113} 113}
114 114
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 1370e358d49a..2d224b945355 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -111,7 +111,7 @@ static void appldata_get_net_sum_data(void *data)
111 net_data->tx_dropped = tx_dropped; 111 net_data->tx_dropped = tx_dropped;
112 net_data->collisions = collisions; 112 net_data->collisions = collisions;
113 113
114 net_data->timestamp = get_clock(); 114 net_data->timestamp = get_tod_clock();
115 net_data->sync_count_2++; 115 net_data->sync_count_2++;
116} 116}
117 117
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 87521ba682e5..de8e2b3b0180 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -156,7 +156,7 @@ static void appldata_get_os_data(void *data)
156 } 156 }
157 ops.size = new_size; 157 ops.size = new_size;
158 } 158 }
159 os_data->timestamp = get_clock(); 159 os_data->timestamp = get_tod_clock();
160 os_data->sync_count_2++; 160 os_data->sync_count_2++;
161} 161}
162 162
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 4f6afaa8bd8f..f364dcf77e8e 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -245,7 +245,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
245 d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); 245 d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
246 if (IS_ERR(d2fc)) 246 if (IS_ERR(d2fc))
247 return PTR_ERR(d2fc); 247 return PTR_ERR(d2fc);
248 get_clock_ext(d2fc->hdr.tod_ext); 248 get_tod_clock_ext(d2fc->hdr.tod_ext);
249 d2fc->hdr.len = count * sizeof(struct diag2fc_data); 249 d2fc->hdr.len = count * sizeof(struct diag2fc_data);
250 d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; 250 d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
251 d2fc->hdr.count = count; 251 d2fc->hdr.count = count;
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 10a508802940..16760eeb79b0 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -13,15 +13,12 @@
13 * to devices. 13 * to devices.
14 */ 14 */
15 15
16static inline void mb(void)
17{
18#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 16#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
19 /* Fast-BCR without checkpoint synchronization */ 17/* Fast-BCR without checkpoint synchronization */
20 asm volatile("bcr 14,0" : : : "memory"); 18#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0)
21#else 19#else
22 asm volatile("bcr 15,0" : : : "memory"); 20#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0)
23#endif 21#endif
24}
25 22
26#define rmb() mb() 23#define rmb() mb()
27#define wmb() mb() 24#define wmb() mb()
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
index 6c3aecc245ff..a0e71a501f7c 100644
--- a/arch/s390/include/asm/clp.h
+++ b/arch/s390/include/asm/clp.h
@@ -2,7 +2,7 @@
2#define _ASM_S390_CLP_H 2#define _ASM_S390_CLP_H
3 3
4/* CLP common request & response block size */ 4/* CLP common request & response block size */
5#define CLP_BLK_SIZE (PAGE_SIZE * 2) 5#define CLP_BLK_SIZE PAGE_SIZE
6 6
7struct clp_req_hdr { 7struct clp_req_hdr {
8 u16 len; 8 u16 len;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 35f0020b7ba7..f1eddd150dd7 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -34,12 +34,12 @@
34/* CPU measurement facility support */ 34/* CPU measurement facility support */
35static inline int cpum_cf_avail(void) 35static inline int cpum_cf_avail(void)
36{ 36{
37 return MACHINE_HAS_SPP && test_facility(67); 37 return MACHINE_HAS_LPP && test_facility(67);
38} 38}
39 39
40static inline int cpum_sf_avail(void) 40static inline int cpum_sf_avail(void)
41{ 41{
42 return MACHINE_HAS_SPP && test_facility(68); 42 return MACHINE_HAS_LPP && test_facility(68);
43} 43}
44 44
45 45
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 8a32f7dfd3af..9411db653bac 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -19,9 +19,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
19} 19}
20 20
21extern int dma_set_mask(struct device *dev, u64 mask); 21extern int dma_set_mask(struct device *dev, u64 mask);
22extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); 22
23extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 23static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
24 enum dma_data_direction direction); 24 enum dma_data_direction direction)
25{
26}
25 27
26#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 28#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
27#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 29#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 0e47a576d666..9977e08df5bd 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -9,7 +9,7 @@
9#include <uapi/asm/mman.h> 9#include <uapi/asm/mman.h>
10 10
11#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) 11#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
12int s390_mmap_check(unsigned long addr, unsigned long len); 12int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) 13#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
14#endif 14#endif
15#endif /* __S390_MMAN_H__ */ 15#endif /* __S390_MMAN_H__ */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a86ad4084073..75ce9b065f9f 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -155,28 +155,6 @@ static inline int page_reset_referenced(unsigned long addr)
155#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ 155#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
156 156
157/* 157/*
158 * Test and clear dirty bit in storage key.
159 * We can't clear the changed bit atomically. This is a potential
160 * race against modification of the referenced bit. This function
161 * should therefore only be called if it is not mapped in any
162 * address space.
163 *
164 * Note that the bit gets set whenever page content is changed. That means
165 * also when the page is modified by DMA or from inside the kernel.
166 */
167#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
168static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
169{
170 unsigned char skey;
171
172 skey = page_get_storage_key(pfn << PAGE_SHIFT);
173 if (!(skey & _PAGE_CHANGED))
174 return 0;
175 page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
176 return 1;
177}
178
179/*
180 * Test and clear referenced bit in storage key. 158 * Test and clear referenced bit in storage key.
181 */ 159 */
182#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 160#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b1fa93c606ad..05333b7f0469 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -160,9 +160,14 @@ void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
160int zpci_msihash_init(void); 160int zpci_msihash_init(void);
161void zpci_msihash_exit(void); 161void zpci_msihash_exit(void);
162 162
163#ifdef CONFIG_PCI
163/* Error handling and recovery */ 164/* Error handling and recovery */
164void zpci_event_error(void *); 165void zpci_event_error(void *);
165void zpci_event_availability(void *); 166void zpci_event_availability(void *);
167#else /* CONFIG_PCI */
168static inline void zpci_event_error(void *e) {}
169static inline void zpci_event_availability(void *e) {}
170#endif /* CONFIG_PCI */
166 171
167/* Helpers */ 172/* Helpers */
168struct zpci_dev *get_zdev(struct pci_dev *); 173struct zpci_dev *get_zdev(struct pci_dev *);
@@ -180,8 +185,10 @@ void zpci_dma_exit(void);
180/* Hotplug */ 185/* Hotplug */
181extern struct mutex zpci_list_lock; 186extern struct mutex zpci_list_lock;
182extern struct list_head zpci_list; 187extern struct list_head zpci_list;
183extern struct pci_hp_callback_ops hotplug_ops; 188extern unsigned int s390_pci_probe;
184extern unsigned int pci_probe; 189
190void zpci_register_hp_ops(struct pci_hp_callback_ops *);
191void zpci_deregister_hp_ops(void);
185 192
186/* FMB */ 193/* FMB */
187int zpci_fmb_enable_device(struct zpci_dev *); 194int zpci_fmb_enable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 098adbb62660..97de1200c849 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -29,6 +29,7 @@
29#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/mm_types.h> 31#include <linux/mm_types.h>
32#include <linux/page-flags.h>
32#include <asm/bug.h> 33#include <asm/bug.h>
33#include <asm/page.h> 34#include <asm/page.h>
34 35
@@ -221,13 +222,15 @@ extern unsigned long MODULES_END;
221/* Software bits in the page table entry */ 222/* Software bits in the page table entry */
222#define _PAGE_SWT 0x001 /* SW pte type bit t */ 223#define _PAGE_SWT 0x001 /* SW pte type bit t */
223#define _PAGE_SWX 0x002 /* SW pte type bit x */ 224#define _PAGE_SWX 0x002 /* SW pte type bit x */
224#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */ 225#define _PAGE_SWC 0x004 /* SW pte changed bit */
225#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */ 226#define _PAGE_SWR 0x008 /* SW pte referenced bit */
226#define _PAGE_SPECIAL 0x010 /* SW associated with special page */ 227#define _PAGE_SWW 0x010 /* SW pte write bit */
228#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
227#define __HAVE_ARCH_PTE_SPECIAL 229#define __HAVE_ARCH_PTE_SPECIAL
228 230
229/* Set of bits not changed in pte_modify */ 231/* Set of bits not changed in pte_modify */
230#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR) 232#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
233 _PAGE_SWC | _PAGE_SWR)
231 234
232/* Six different types of pages. */ 235/* Six different types of pages. */
233#define _PAGE_TYPE_EMPTY 0x400 236#define _PAGE_TYPE_EMPTY 0x400
@@ -321,6 +324,7 @@ extern unsigned long MODULES_END;
321 324
322/* Bits in the region table entry */ 325/* Bits in the region table entry */
323#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 326#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
327#define _REGION_ENTRY_RO 0x200 /* region protection bit */
324#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 328#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
325#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 329#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
326#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 330#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
@@ -382,9 +386,11 @@ extern unsigned long MODULES_END;
382 */ 386 */
383#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 387#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
384#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 388#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
385#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 389#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
390#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
386 391
387#define PAGE_KERNEL PAGE_RW 392#define PAGE_KERNEL PAGE_RWC
393#define PAGE_SHARED PAGE_KERNEL
388#define PAGE_COPY PAGE_RO 394#define PAGE_COPY PAGE_RO
389 395
390/* 396/*
@@ -631,23 +637,23 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
631 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 637 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
632 /* Clear page changed & referenced bit in the storage key */ 638 /* Clear page changed & referenced bit in the storage key */
633 if (bits & _PAGE_CHANGED) 639 if (bits & _PAGE_CHANGED)
634 page_set_storage_key(address, skey ^ bits, 1); 640 page_set_storage_key(address, skey ^ bits, 0);
635 else if (bits) 641 else if (bits)
636 page_reset_referenced(address); 642 page_reset_referenced(address);
637 /* Transfer page changed & referenced bit to guest bits in pgste */ 643 /* Transfer page changed & referenced bit to guest bits in pgste */
638 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ 644 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
639 /* Get host changed & referenced bits from pgste */ 645 /* Get host changed & referenced bits from pgste */
640 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; 646 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
641 /* Clear host bits in pgste. */ 647 /* Transfer page changed & referenced bit to kvm user bits */
648 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
649 /* Clear relevant host bits in pgste. */
642 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); 650 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
643 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); 651 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
644 /* Copy page access key and fetch protection bit to pgste */ 652 /* Copy page access key and fetch protection bit to pgste */
645 pgste_val(pgste) |= 653 pgste_val(pgste) |=
646 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 654 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
647 /* Transfer changed and referenced to kvm user bits */ 655 /* Transfer referenced bit to pte */
648 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ 656 pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
649 /* Transfer changed & referenced to pte sofware bits */
650 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
651#endif 657#endif
652 return pgste; 658 return pgste;
653 659
@@ -660,20 +666,25 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
660 666
661 if (!pte_present(*ptep)) 667 if (!pte_present(*ptep))
662 return pgste; 668 return pgste;
669 /* Get referenced bit from storage key */
663 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 670 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
664 /* Transfer page referenced bit to pte software bit (host view) */ 671 if (young)
665 if (young || (pgste_val(pgste) & RCP_HR_BIT)) 672 pgste_val(pgste) |= RCP_GR_BIT;
673 /* Get host referenced bit from pgste */
674 if (pgste_val(pgste) & RCP_HR_BIT) {
675 pgste_val(pgste) &= ~RCP_HR_BIT;
676 young = 1;
677 }
678 /* Transfer referenced bit to kvm user bits and pte */
679 if (young) {
680 pgste_val(pgste) |= KVM_UR_BIT;
666 pte_val(*ptep) |= _PAGE_SWR; 681 pte_val(*ptep) |= _PAGE_SWR;
667 /* Clear host referenced bit in pgste. */ 682 }
668 pgste_val(pgste) &= ~RCP_HR_BIT;
669 /* Transfer page referenced bit to guest bit in pgste */
670 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
671#endif 683#endif
672 return pgste; 684 return pgste;
673
674} 685}
675 686
676static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) 687static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
677{ 688{
678#ifdef CONFIG_PGSTE 689#ifdef CONFIG_PGSTE
679 unsigned long address; 690 unsigned long address;
@@ -687,10 +698,23 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
687 /* Set page access key and fetch protection bit from pgste */ 698 /* Set page access key and fetch protection bit from pgste */
688 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; 699 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
689 if (okey != nkey) 700 if (okey != nkey)
690 page_set_storage_key(address, nkey, 1); 701 page_set_storage_key(address, nkey, 0);
691#endif 702#endif
692} 703}
693 704
705static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
706{
707 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
708 /*
709 * Without enhanced suppression-on-protection force
710 * the dirty bit on for all writable ptes.
711 */
712 pte_val(entry) |= _PAGE_SWC;
713 pte_val(entry) &= ~_PAGE_RO;
714 }
715 *ptep = entry;
716}
717
694/** 718/**
695 * struct gmap_struct - guest address space 719 * struct gmap_struct - guest address space
696 * @mm: pointer to the parent mm_struct 720 * @mm: pointer to the parent mm_struct
@@ -749,11 +773,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
749 773
750 if (mm_has_pgste(mm)) { 774 if (mm_has_pgste(mm)) {
751 pgste = pgste_get_lock(ptep); 775 pgste = pgste_get_lock(ptep);
752 pgste_set_pte(ptep, pgste, entry); 776 pgste_set_key(ptep, pgste, entry);
753 *ptep = entry; 777 pgste_set_pte(ptep, entry);
754 pgste_set_unlock(ptep, pgste); 778 pgste_set_unlock(ptep, pgste);
755 } else 779 } else {
780 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
781 pte_val(entry) |= _PAGE_CO;
756 *ptep = entry; 782 *ptep = entry;
783 }
757} 784}
758 785
759/* 786/*
@@ -762,16 +789,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
762 */ 789 */
763static inline int pte_write(pte_t pte) 790static inline int pte_write(pte_t pte)
764{ 791{
765 return (pte_val(pte) & _PAGE_RO) == 0; 792 return (pte_val(pte) & _PAGE_SWW) != 0;
766} 793}
767 794
768static inline int pte_dirty(pte_t pte) 795static inline int pte_dirty(pte_t pte)
769{ 796{
770#ifdef CONFIG_PGSTE 797 return (pte_val(pte) & _PAGE_SWC) != 0;
771 if (pte_val(pte) & _PAGE_SWC)
772 return 1;
773#endif
774 return 0;
775} 798}
776 799
777static inline int pte_young(pte_t pte) 800static inline int pte_young(pte_t pte)
@@ -821,11 +844,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
821{ 844{
822 pte_val(pte) &= _PAGE_CHG_MASK; 845 pte_val(pte) &= _PAGE_CHG_MASK;
823 pte_val(pte) |= pgprot_val(newprot); 846 pte_val(pte) |= pgprot_val(newprot);
847 if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
848 pte_val(pte) &= ~_PAGE_RO;
824 return pte; 849 return pte;
825} 850}
826 851
827static inline pte_t pte_wrprotect(pte_t pte) 852static inline pte_t pte_wrprotect(pte_t pte)
828{ 853{
854 pte_val(pte) &= ~_PAGE_SWW;
829 /* Do not clobber _PAGE_TYPE_NONE pages! */ 855 /* Do not clobber _PAGE_TYPE_NONE pages! */
830 if (!(pte_val(pte) & _PAGE_INVALID)) 856 if (!(pte_val(pte) & _PAGE_INVALID))
831 pte_val(pte) |= _PAGE_RO; 857 pte_val(pte) |= _PAGE_RO;
@@ -834,20 +860,26 @@ static inline pte_t pte_wrprotect(pte_t pte)
834 860
835static inline pte_t pte_mkwrite(pte_t pte) 861static inline pte_t pte_mkwrite(pte_t pte)
836{ 862{
837 pte_val(pte) &= ~_PAGE_RO; 863 pte_val(pte) |= _PAGE_SWW;
864 if (pte_val(pte) & _PAGE_SWC)
865 pte_val(pte) &= ~_PAGE_RO;
838 return pte; 866 return pte;
839} 867}
840 868
841static inline pte_t pte_mkclean(pte_t pte) 869static inline pte_t pte_mkclean(pte_t pte)
842{ 870{
843#ifdef CONFIG_PGSTE
844 pte_val(pte) &= ~_PAGE_SWC; 871 pte_val(pte) &= ~_PAGE_SWC;
845#endif 872 /* Do not clobber _PAGE_TYPE_NONE pages! */
873 if (!(pte_val(pte) & _PAGE_INVALID))
874 pte_val(pte) |= _PAGE_RO;
846 return pte; 875 return pte;
847} 876}
848 877
849static inline pte_t pte_mkdirty(pte_t pte) 878static inline pte_t pte_mkdirty(pte_t pte)
850{ 879{
880 pte_val(pte) |= _PAGE_SWC;
881 if (pte_val(pte) & _PAGE_SWW)
882 pte_val(pte) &= ~_PAGE_RO;
851 return pte; 883 return pte;
852} 884}
853 885
@@ -885,10 +917,10 @@ static inline pte_t pte_mkhuge(pte_t pte)
885 pte_val(pte) |= _SEGMENT_ENTRY_INV; 917 pte_val(pte) |= _SEGMENT_ENTRY_INV;
886 } 918 }
887 /* 919 /*
888 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment 920 * Clear SW pte bits, there are no SW bits in a segment table entry.
889 * table entry.
890 */ 921 */
891 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); 922 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
923 _PAGE_SWR | _PAGE_SWW);
892 /* 924 /*
893 * Also set the change-override bit because we don't need dirty bit 925 * Also set the change-override bit because we don't need dirty bit
894 * tracking for hugetlbfs pages. 926 * tracking for hugetlbfs pages.
@@ -1040,9 +1072,11 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1040 unsigned long address, 1072 unsigned long address,
1041 pte_t *ptep, pte_t pte) 1073 pte_t *ptep, pte_t pte)
1042{ 1074{
1043 *ptep = pte; 1075 if (mm_has_pgste(mm)) {
1044 if (mm_has_pgste(mm)) 1076 pgste_set_pte(ptep, pte);
1045 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); 1077 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
1078 } else
1079 *ptep = pte;
1046} 1080}
1047 1081
1048#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1082#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
@@ -1110,10 +1144,13 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1110 1144
1111 if (!mm_exclusive(mm)) 1145 if (!mm_exclusive(mm))
1112 __ptep_ipte(address, ptep); 1146 __ptep_ipte(address, ptep);
1113 *ptep = pte_wrprotect(pte); 1147 pte = pte_wrprotect(pte);
1114 1148
1115 if (mm_has_pgste(mm)) 1149 if (mm_has_pgste(mm)) {
1150 pgste_set_pte(ptep, pte);
1116 pgste_set_unlock(ptep, pgste); 1151 pgste_set_unlock(ptep, pgste);
1152 } else
1153 *ptep = pte;
1117 } 1154 }
1118 return pte; 1155 return pte;
1119} 1156}
@@ -1131,10 +1168,12 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1131 pgste = pgste_get_lock(ptep); 1168 pgste = pgste_get_lock(ptep);
1132 1169
1133 __ptep_ipte(address, ptep); 1170 __ptep_ipte(address, ptep);
1134 *ptep = entry;
1135 1171
1136 if (mm_has_pgste(vma->vm_mm)) 1172 if (mm_has_pgste(vma->vm_mm)) {
1173 pgste_set_pte(ptep, entry);
1137 pgste_set_unlock(ptep, pgste); 1174 pgste_set_unlock(ptep, pgste);
1175 } else
1176 *ptep = entry;
1138 return 1; 1177 return 1;
1139} 1178}
1140 1179
@@ -1152,8 +1191,13 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1152static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1191static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1153{ 1192{
1154 unsigned long physpage = page_to_phys(page); 1193 unsigned long physpage = page_to_phys(page);
1194 pte_t __pte = mk_pte_phys(physpage, pgprot);
1155 1195
1156 return mk_pte_phys(physpage, pgprot); 1196 if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
1197 pte_val(__pte) |= _PAGE_SWC;
1198 pte_val(__pte) &= ~_PAGE_RO;
1199 }
1200 return __pte;
1157} 1201}
1158 1202
1159#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1203#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -1245,6 +1289,8 @@ static inline int pmd_trans_splitting(pmd_t pmd)
1245static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1289static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1246 pmd_t *pmdp, pmd_t entry) 1290 pmd_t *pmdp, pmd_t entry)
1247{ 1291{
1292 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1293 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1248 *pmdp = entry; 1294 *pmdp = entry;
1249} 1295}
1250 1296
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 833788693f09..06a136136047 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -46,7 +46,6 @@ int sclp_cpu_deconfigure(u8 cpu);
46void sclp_facilities_detect(void); 46void sclp_facilities_detect(void);
47unsigned long long sclp_get_rnmax(void); 47unsigned long long sclp_get_rnmax(void);
48unsigned long long sclp_get_rzm(void); 48unsigned long long sclp_get_rzm(void);
49u8 sclp_get_fac85(void);
50int sclp_sdias_blk_count(void); 49int sclp_sdias_blk_count(void);
51int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 50int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
52int sclp_chp_configure(struct chp_id chpid); 51int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f69f76b3447a..ff67d730c00c 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -64,17 +64,18 @@ extern unsigned int s390_user_mode;
64 64
65#define MACHINE_FLAG_VM (1UL << 0) 65#define MACHINE_FLAG_VM (1UL << 0)
66#define MACHINE_FLAG_IEEE (1UL << 1) 66#define MACHINE_FLAG_IEEE (1UL << 1)
67#define MACHINE_FLAG_CSP (1UL << 3) 67#define MACHINE_FLAG_CSP (1UL << 2)
68#define MACHINE_FLAG_MVPG (1UL << 4) 68#define MACHINE_FLAG_MVPG (1UL << 3)
69#define MACHINE_FLAG_DIAG44 (1UL << 5) 69#define MACHINE_FLAG_DIAG44 (1UL << 4)
70#define MACHINE_FLAG_IDTE (1UL << 6) 70#define MACHINE_FLAG_IDTE (1UL << 5)
71#define MACHINE_FLAG_DIAG9C (1UL << 7) 71#define MACHINE_FLAG_DIAG9C (1UL << 6)
72#define MACHINE_FLAG_MVCOS (1UL << 8) 72#define MACHINE_FLAG_MVCOS (1UL << 7)
73#define MACHINE_FLAG_KVM (1UL << 9) 73#define MACHINE_FLAG_KVM (1UL << 8)
74#define MACHINE_FLAG_ESOP (1UL << 9)
74#define MACHINE_FLAG_EDAT1 (1UL << 10) 75#define MACHINE_FLAG_EDAT1 (1UL << 10)
75#define MACHINE_FLAG_EDAT2 (1UL << 11) 76#define MACHINE_FLAG_EDAT2 (1UL << 11)
76#define MACHINE_FLAG_LPAR (1UL << 12) 77#define MACHINE_FLAG_LPAR (1UL << 12)
77#define MACHINE_FLAG_SPP (1UL << 13) 78#define MACHINE_FLAG_LPP (1UL << 13)
78#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 79#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
79#define MACHINE_FLAG_TE (1UL << 15) 80#define MACHINE_FLAG_TE (1UL << 15)
80#define MACHINE_FLAG_RRBM (1UL << 16) 81#define MACHINE_FLAG_RRBM (1UL << 16)
@@ -84,6 +85,7 @@ extern unsigned int s390_user_mode;
84#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) 85#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
85 86
86#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 87#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
88#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
87#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 89#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
88#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 90#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
89 91
@@ -96,7 +98,7 @@ extern unsigned int s390_user_mode;
96#define MACHINE_HAS_MVCOS (0) 98#define MACHINE_HAS_MVCOS (0)
97#define MACHINE_HAS_EDAT1 (0) 99#define MACHINE_HAS_EDAT1 (0)
98#define MACHINE_HAS_EDAT2 (0) 100#define MACHINE_HAS_EDAT2 (0)
99#define MACHINE_HAS_SPP (0) 101#define MACHINE_HAS_LPP (0)
100#define MACHINE_HAS_TOPOLOGY (0) 102#define MACHINE_HAS_TOPOLOGY (0)
101#define MACHINE_HAS_TE (0) 103#define MACHINE_HAS_TE (0)
102#define MACHINE_HAS_RRBM (0) 104#define MACHINE_HAS_RRBM (0)
@@ -109,7 +111,7 @@ extern unsigned int s390_user_mode;
109#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) 111#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
110#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 112#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
111#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 113#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 114#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 115#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
114#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) 116#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
115#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) 117#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 4c060bb5b8ea..8ad8af915032 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -15,7 +15,7 @@
15#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 15#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
16 16
17/* Inline functions for clock register access. */ 17/* Inline functions for clock register access. */
18static inline int set_clock(__u64 time) 18static inline int set_tod_clock(__u64 time)
19{ 19{
20 int cc; 20 int cc;
21 21
@@ -27,7 +27,7 @@ static inline int set_clock(__u64 time)
27 return cc; 27 return cc;
28} 28}
29 29
30static inline int store_clock(__u64 *time) 30static inline int store_tod_clock(__u64 *time)
31{ 31{
32 int cc; 32 int cc;
33 33
@@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp)
71 71
72typedef unsigned long long cycles_t; 72typedef unsigned long long cycles_t;
73 73
74static inline unsigned long long get_clock(void) 74static inline unsigned long long get_tod_clock(void)
75{ 75{
76 unsigned long long clk; 76 unsigned long long clk;
77 77
@@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void)
83 return clk; 83 return clk;
84} 84}
85 85
86static inline void get_clock_ext(char *clk) 86static inline void get_tod_clock_ext(char *clk)
87{ 87{
88 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 88 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
89} 89}
90 90
91static inline unsigned long long get_clock_xt(void) 91static inline unsigned long long get_tod_clock_xt(void)
92{ 92{
93 unsigned char clk[16]; 93 unsigned char clk[16];
94 get_clock_ext(clk); 94 get_tod_clock_ext(clk);
95 return *((unsigned long long *)&clk[1]); 95 return *((unsigned long long *)&clk[1]);
96} 96}
97 97
98static inline cycles_t get_cycles(void) 98static inline cycles_t get_cycles(void)
99{ 99{
100 return (cycles_t) get_clock() >> 2; 100 return (cycles_t) get_tod_clock() >> 2;
101} 101}
102 102
103int get_sync_clock(unsigned long long *clock); 103int get_sync_clock(unsigned long long *clock);
@@ -123,9 +123,9 @@ extern u64 sched_clock_base_cc;
123 * function, otherwise the returned value is not guaranteed to 123 * function, otherwise the returned value is not guaranteed to
124 * be monotonic. 124 * be monotonic.
125 */ 125 */
126static inline unsigned long long get_clock_monotonic(void) 126static inline unsigned long long get_tod_clock_monotonic(void)
127{ 127{
128 return get_clock_xt() - sched_clock_base_cc; 128 return get_tod_clock_xt() - sched_clock_base_cc;
129} 129}
130 130
131/** 131/**
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 4e8215e0d4b6..09a94cd9debc 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@ static inline void
867debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 867debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
868 int exception) 868 int exception)
869{ 869{
870 active->id.stck = get_clock(); 870 active->id.stck = get_tod_clock();
871 active->id.fields.cpuid = smp_processor_id(); 871 active->id.fields.cpuid = smp_processor_id();
872 active->caller = __builtin_return_address(0); 872 active->caller = __builtin_return_address(0);
873 active->id.fields.exception = exception; 873 active->id.fields.exception = exception;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index a7f9abd98cf2..c50665fe9435 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -840,7 +840,6 @@ static struct insn opcode_b2[] = {
840 { "stcke", 0x78, INSTR_S_RD }, 840 { "stcke", 0x78, INSTR_S_RD },
841 { "sacf", 0x79, INSTR_S_RD }, 841 { "sacf", 0x79, INSTR_S_RD },
842 { "stsi", 0x7d, INSTR_S_RD }, 842 { "stsi", 0x7d, INSTR_S_RD },
843 { "spp", 0x80, INSTR_S_RD },
844 { "srnm", 0x99, INSTR_S_RD }, 843 { "srnm", 0x99, INSTR_S_RD },
845 { "stfpc", 0x9c, INSTR_S_RD }, 844 { "stfpc", 0x9c, INSTR_S_RD },
846 { "lfpc", 0x9d, INSTR_S_RD }, 845 { "lfpc", 0x9d, INSTR_S_RD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1f0eee9e7daa..bda011e2f8ae 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -47,10 +47,10 @@ static void __init reset_tod_clock(void)
47{ 47{
48 u64 time; 48 u64 time;
49 49
50 if (store_clock(&time) == 0) 50 if (store_tod_clock(&time) == 0)
51 return; 51 return;
52 /* TOD clock not running. Set the clock to Unix Epoch. */ 52 /* TOD clock not running. Set the clock to Unix Epoch. */
53 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) 53 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
54 disabled_wait(0); 54 disabled_wait(0);
55 55
56 sched_clock_base_cc = TOD_UNIX_EPOCH; 56 sched_clock_base_cc = TOD_UNIX_EPOCH;
@@ -173,7 +173,7 @@ static noinline __init void create_kernel_nss(void)
173 } 173 }
174 174
175 /* re-initialize cputime accounting. */ 175 /* re-initialize cputime accounting. */
176 sched_clock_base_cc = get_clock(); 176 sched_clock_base_cc = get_tod_clock();
177 S390_lowcore.last_update_clock = sched_clock_base_cc; 177 S390_lowcore.last_update_clock = sched_clock_base_cc;
178 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; 178 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
179 S390_lowcore.user_timer = 0; 179 S390_lowcore.user_timer = 0;
@@ -381,7 +381,7 @@ static __init void detect_machine_facilities(void)
381 if (test_facility(27)) 381 if (test_facility(27))
382 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 382 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
383 if (test_facility(40)) 383 if (test_facility(40))
384 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 384 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
385 if (test_facility(50) && test_facility(73)) 385 if (test_facility(50) && test_facility(73))
386 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 386 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
387 if (test_facility(66)) 387 if (test_facility(66))
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 6d34e0c97a39..9c837c101297 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -72,9 +72,9 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
72#endif 72#endif
73 .endm 73 .endm
74 74
75 .macro SPP newpp 75 .macro LPP newpp
76#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 76#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
77 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP 77 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
78 jz .+8 78 jz .+8
79 .insn s,0xb2800000,\newpp 79 .insn s,0xb2800000,\newpp
80#endif 80#endif
@@ -96,7 +96,7 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
96 jhe .+22 96 jhe .+22
97 .endif 97 .endif
98 lg %r9,BASED(.Lsie_loop) 98 lg %r9,BASED(.Lsie_loop)
99 SPP BASED(.Lhost_id) # set host id 99 LPP BASED(.Lhost_id) # set host id
100#endif 100#endif
101 .endm 101 .endm
102 102
@@ -967,10 +967,10 @@ sie_loop:
967 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 967 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
968sie_gmap: 968sie_gmap:
969 lg %r14,__SF_EMPTY(%r15) # get control block pointer 969 lg %r14,__SF_EMPTY(%r15) # get control block pointer
970 SPP __SF_EMPTY(%r15) # set guest id 970 LPP __SF_EMPTY(%r15) # set guest id
971 sie 0(%r14) 971 sie 0(%r14)
972sie_done: 972sie_done:
973 SPP __SF_EMPTY+16(%r15) # set host id 973 LPP __SF_EMPTY+16(%r15) # set host id
974 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 974 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
975sie_exit: 975sie_exit:
976 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 976 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 6ffcd3203215..d8a6a385d048 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1414,6 +1414,16 @@ static struct kobj_attribute dump_type_attr =
1414 1414
1415static struct kset *dump_kset; 1415static struct kset *dump_kset;
1416 1416
1417static void diag308_dump(void *dump_block)
1418{
1419 diag308(DIAG308_SET, dump_block);
1420 while (1) {
1421 if (diag308(DIAG308_DUMP, NULL) != 0x302)
1422 break;
1423 udelay_simple(USEC_PER_SEC);
1424 }
1425}
1426
1417static void __dump_run(void *unused) 1427static void __dump_run(void *unused)
1418{ 1428{
1419 struct ccw_dev_id devid; 1429 struct ccw_dev_id devid;
@@ -1432,12 +1442,10 @@ static void __dump_run(void *unused)
1432 __cpcmd(buf, NULL, 0, NULL); 1442 __cpcmd(buf, NULL, 0, NULL);
1433 break; 1443 break;
1434 case DUMP_METHOD_CCW_DIAG: 1444 case DUMP_METHOD_CCW_DIAG:
1435 diag308(DIAG308_SET, dump_block_ccw); 1445 diag308_dump(dump_block_ccw);
1436 diag308(DIAG308_DUMP, NULL);
1437 break; 1446 break;
1438 case DUMP_METHOD_FCP_DIAG: 1447 case DUMP_METHOD_FCP_DIAG:
1439 diag308(DIAG308_SET, dump_block_fcp); 1448 diag308_dump(dump_block_fcp);
1440 diag308(DIAG308_DUMP, NULL);
1441 break; 1449 break;
1442 default: 1450 default:
1443 break; 1451 break;
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 4610deafd953..f750bd7bd2c2 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -65,8 +65,7 @@ void module_free(struct module *mod, void *module_region)
65 vfree(module_region); 65 vfree(module_region);
66} 66}
67 67
68static void 68static void check_rela(Elf_Rela *rela, struct module *me)
69check_rela(Elf_Rela *rela, struct module *me)
70{ 69{
71 struct mod_arch_syminfo *info; 70 struct mod_arch_syminfo *info;
72 71
@@ -115,9 +114,8 @@ check_rela(Elf_Rela *rela, struct module *me)
115 * Account for GOT and PLT relocations. We can't add sections for 114 * Account for GOT and PLT relocations. We can't add sections for
116 * got and plt but we can increase the core module size. 115 * got and plt but we can increase the core module size.
117 */ 116 */
118int 117int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
119module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 118 char *secstrings, struct module *me)
120 char *secstrings, struct module *me)
121{ 119{
122 Elf_Shdr *symtab; 120 Elf_Shdr *symtab;
123 Elf_Sym *symbols; 121 Elf_Sym *symbols;
@@ -179,13 +177,52 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
179 return 0; 177 return 0;
180} 178}
181 179
182static int 180static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
183apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 181 int sign, int bits, int shift)
184 struct module *me) 182{
183 unsigned long umax;
184 long min, max;
185
186 if (val & ((1UL << shift) - 1))
187 return -ENOEXEC;
188 if (sign) {
189 val = (Elf_Addr)(((long) val) >> shift);
190 min = -(1L << (bits - 1));
191 max = (1L << (bits - 1)) - 1;
192 if ((long) val < min || (long) val > max)
193 return -ENOEXEC;
194 } else {
195 val >>= shift;
196 umax = ((1UL << (bits - 1)) << 1) - 1;
197 if ((unsigned long) val > umax)
198 return -ENOEXEC;
199 }
200
201 if (bits == 8)
202 *(unsigned char *) loc = val;
203 else if (bits == 12)
204 *(unsigned short *) loc = (val & 0xfff) |
205 (*(unsigned short *) loc & 0xf000);
206 else if (bits == 16)
207 *(unsigned short *) loc = val;
208 else if (bits == 20)
209 *(unsigned int *) loc = (val & 0xfff) << 16 |
210 (val & 0xff000) >> 4 |
211 (*(unsigned int *) loc & 0xf00000ff);
212 else if (bits == 32)
213 *(unsigned int *) loc = val;
214 else if (bits == 64)
215 *(unsigned long *) loc = val;
216 return 0;
217}
218
219static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
220 const char *strtab, struct module *me)
185{ 221{
186 struct mod_arch_syminfo *info; 222 struct mod_arch_syminfo *info;
187 Elf_Addr loc, val; 223 Elf_Addr loc, val;
188 int r_type, r_sym; 224 int r_type, r_sym;
225 int rc;
189 226
190 /* This is where to make the change */ 227 /* This is where to make the change */
191 loc = base + rela->r_offset; 228 loc = base + rela->r_offset;
@@ -197,6 +234,9 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
197 val = symtab[r_sym].st_value; 234 val = symtab[r_sym].st_value;
198 235
199 switch (r_type) { 236 switch (r_type) {
237 case R_390_NONE: /* No relocation. */
238 rc = 0;
239 break;
200 case R_390_8: /* Direct 8 bit. */ 240 case R_390_8: /* Direct 8 bit. */
201 case R_390_12: /* Direct 12 bit. */ 241 case R_390_12: /* Direct 12 bit. */
202 case R_390_16: /* Direct 16 bit. */ 242 case R_390_16: /* Direct 16 bit. */
@@ -205,20 +245,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
205 case R_390_64: /* Direct 64 bit. */ 245 case R_390_64: /* Direct 64 bit. */
206 val += rela->r_addend; 246 val += rela->r_addend;
207 if (r_type == R_390_8) 247 if (r_type == R_390_8)
208 *(unsigned char *) loc = val; 248 rc = apply_rela_bits(loc, val, 0, 8, 0);
209 else if (r_type == R_390_12) 249 else if (r_type == R_390_12)
210 *(unsigned short *) loc = (val & 0xfff) | 250 rc = apply_rela_bits(loc, val, 0, 12, 0);
211 (*(unsigned short *) loc & 0xf000);
212 else if (r_type == R_390_16) 251 else if (r_type == R_390_16)
213 *(unsigned short *) loc = val; 252 rc = apply_rela_bits(loc, val, 0, 16, 0);
214 else if (r_type == R_390_20) 253 else if (r_type == R_390_20)
215 *(unsigned int *) loc = 254 rc = apply_rela_bits(loc, val, 1, 20, 0);
216 (*(unsigned int *) loc & 0xf00000ff) |
217 (val & 0xfff) << 16 | (val & 0xff000) >> 4;
218 else if (r_type == R_390_32) 255 else if (r_type == R_390_32)
219 *(unsigned int *) loc = val; 256 rc = apply_rela_bits(loc, val, 0, 32, 0);
220 else if (r_type == R_390_64) 257 else if (r_type == R_390_64)
221 *(unsigned long *) loc = val; 258 rc = apply_rela_bits(loc, val, 0, 64, 0);
222 break; 259 break;
223 case R_390_PC16: /* PC relative 16 bit. */ 260 case R_390_PC16: /* PC relative 16 bit. */
224 case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ 261 case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
@@ -227,15 +264,15 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
227 case R_390_PC64: /* PC relative 64 bit. */ 264 case R_390_PC64: /* PC relative 64 bit. */
228 val += rela->r_addend - loc; 265 val += rela->r_addend - loc;
229 if (r_type == R_390_PC16) 266 if (r_type == R_390_PC16)
230 *(unsigned short *) loc = val; 267 rc = apply_rela_bits(loc, val, 1, 16, 0);
231 else if (r_type == R_390_PC16DBL) 268 else if (r_type == R_390_PC16DBL)
232 *(unsigned short *) loc = val >> 1; 269 rc = apply_rela_bits(loc, val, 1, 16, 1);
233 else if (r_type == R_390_PC32DBL) 270 else if (r_type == R_390_PC32DBL)
234 *(unsigned int *) loc = val >> 1; 271 rc = apply_rela_bits(loc, val, 1, 32, 1);
235 else if (r_type == R_390_PC32) 272 else if (r_type == R_390_PC32)
236 *(unsigned int *) loc = val; 273 rc = apply_rela_bits(loc, val, 1, 32, 0);
237 else if (r_type == R_390_PC64) 274 else if (r_type == R_390_PC64)
238 *(unsigned long *) loc = val; 275 rc = apply_rela_bits(loc, val, 1, 64, 0);
239 break; 276 break;
240 case R_390_GOT12: /* 12 bit GOT offset. */ 277 case R_390_GOT12: /* 12 bit GOT offset. */
241 case R_390_GOT16: /* 16 bit GOT offset. */ 278 case R_390_GOT16: /* 16 bit GOT offset. */
@@ -260,26 +297,24 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
260 val = info->got_offset + rela->r_addend; 297 val = info->got_offset + rela->r_addend;
261 if (r_type == R_390_GOT12 || 298 if (r_type == R_390_GOT12 ||
262 r_type == R_390_GOTPLT12) 299 r_type == R_390_GOTPLT12)
263 *(unsigned short *) loc = (val & 0xfff) | 300 rc = apply_rela_bits(loc, val, 0, 12, 0);
264 (*(unsigned short *) loc & 0xf000);
265 else if (r_type == R_390_GOT16 || 301 else if (r_type == R_390_GOT16 ||
266 r_type == R_390_GOTPLT16) 302 r_type == R_390_GOTPLT16)
267 *(unsigned short *) loc = val; 303 rc = apply_rela_bits(loc, val, 0, 16, 0);
268 else if (r_type == R_390_GOT20 || 304 else if (r_type == R_390_GOT20 ||
269 r_type == R_390_GOTPLT20) 305 r_type == R_390_GOTPLT20)
270 *(unsigned int *) loc = 306 rc = apply_rela_bits(loc, val, 1, 20, 0);
271 (*(unsigned int *) loc & 0xf00000ff) |
272 (val & 0xfff) << 16 | (val & 0xff000) >> 4;
273 else if (r_type == R_390_GOT32 || 307 else if (r_type == R_390_GOT32 ||
274 r_type == R_390_GOTPLT32) 308 r_type == R_390_GOTPLT32)
275 *(unsigned int *) loc = val; 309 rc = apply_rela_bits(loc, val, 0, 32, 0);
276 else if (r_type == R_390_GOTENT ||
277 r_type == R_390_GOTPLTENT)
278 *(unsigned int *) loc =
279 (val + (Elf_Addr) me->module_core - loc) >> 1;
280 else if (r_type == R_390_GOT64 || 310 else if (r_type == R_390_GOT64 ||
281 r_type == R_390_GOTPLT64) 311 r_type == R_390_GOTPLT64)
282 *(unsigned long *) loc = val; 312 rc = apply_rela_bits(loc, val, 0, 64, 0);
313 else if (r_type == R_390_GOTENT ||
314 r_type == R_390_GOTPLTENT) {
315 val += (Elf_Addr) me->module_core - loc;
316 rc = apply_rela_bits(loc, val, 1, 32, 1);
317 }
283 break; 318 break;
284 case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ 319 case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
285 case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ 320 case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
@@ -321,17 +356,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
321 val += rela->r_addend - loc; 356 val += rela->r_addend - loc;
322 } 357 }
323 if (r_type == R_390_PLT16DBL) 358 if (r_type == R_390_PLT16DBL)
324 *(unsigned short *) loc = val >> 1; 359 rc = apply_rela_bits(loc, val, 1, 16, 1);
325 else if (r_type == R_390_PLTOFF16) 360 else if (r_type == R_390_PLTOFF16)
326 *(unsigned short *) loc = val; 361 rc = apply_rela_bits(loc, val, 0, 16, 0);
327 else if (r_type == R_390_PLT32DBL) 362 else if (r_type == R_390_PLT32DBL)
328 *(unsigned int *) loc = val >> 1; 363 rc = apply_rela_bits(loc, val, 1, 32, 1);
329 else if (r_type == R_390_PLT32 || 364 else if (r_type == R_390_PLT32 ||
330 r_type == R_390_PLTOFF32) 365 r_type == R_390_PLTOFF32)
331 *(unsigned int *) loc = val; 366 rc = apply_rela_bits(loc, val, 0, 32, 0);
332 else if (r_type == R_390_PLT64 || 367 else if (r_type == R_390_PLT64 ||
333 r_type == R_390_PLTOFF64) 368 r_type == R_390_PLTOFF64)
334 *(unsigned long *) loc = val; 369 rc = apply_rela_bits(loc, val, 0, 64, 0);
335 break; 370 break;
336 case R_390_GOTOFF16: /* 16 bit offset to GOT. */ 371 case R_390_GOTOFF16: /* 16 bit offset to GOT. */
337 case R_390_GOTOFF32: /* 32 bit offset to GOT. */ 372 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
@@ -339,20 +374,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
339 val = val + rela->r_addend - 374 val = val + rela->r_addend -
340 ((Elf_Addr) me->module_core + me->arch.got_offset); 375 ((Elf_Addr) me->module_core + me->arch.got_offset);
341 if (r_type == R_390_GOTOFF16) 376 if (r_type == R_390_GOTOFF16)
342 *(unsigned short *) loc = val; 377 rc = apply_rela_bits(loc, val, 0, 16, 0);
343 else if (r_type == R_390_GOTOFF32) 378 else if (r_type == R_390_GOTOFF32)
344 *(unsigned int *) loc = val; 379 rc = apply_rela_bits(loc, val, 0, 32, 0);
345 else if (r_type == R_390_GOTOFF64) 380 else if (r_type == R_390_GOTOFF64)
346 *(unsigned long *) loc = val; 381 rc = apply_rela_bits(loc, val, 0, 64, 0);
347 break; 382 break;
348 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ 383 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
349 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ 384 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
350 val = (Elf_Addr) me->module_core + me->arch.got_offset + 385 val = (Elf_Addr) me->module_core + me->arch.got_offset +
351 rela->r_addend - loc; 386 rela->r_addend - loc;
352 if (r_type == R_390_GOTPC) 387 if (r_type == R_390_GOTPC)
353 *(unsigned int *) loc = val; 388 rc = apply_rela_bits(loc, val, 1, 32, 0);
354 else if (r_type == R_390_GOTPCDBL) 389 else if (r_type == R_390_GOTPCDBL)
355 *(unsigned int *) loc = val >> 1; 390 rc = apply_rela_bits(loc, val, 1, 32, 1);
356 break; 391 break;
357 case R_390_COPY: 392 case R_390_COPY:
358 case R_390_GLOB_DAT: /* Create GOT entry. */ 393 case R_390_GLOB_DAT: /* Create GOT entry. */
@@ -360,19 +395,25 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
360 case R_390_RELATIVE: /* Adjust by program base. */ 395 case R_390_RELATIVE: /* Adjust by program base. */
361 /* Only needed if we want to support loading of 396 /* Only needed if we want to support loading of
362 modules linked with -shared. */ 397 modules linked with -shared. */
363 break; 398 return -ENOEXEC;
364 default: 399 default:
365 printk(KERN_ERR "module %s: Unknown relocation: %u\n", 400 printk(KERN_ERR "module %s: unknown relocation: %u\n",
366 me->name, r_type); 401 me->name, r_type);
367 return -ENOEXEC; 402 return -ENOEXEC;
368 } 403 }
404 if (rc) {
405 printk(KERN_ERR "module %s: relocation error for symbol %s "
406 "(r_type %i, value 0x%lx)\n",
407 me->name, strtab + symtab[r_sym].st_name,
408 r_type, (unsigned long) val);
409 return rc;
410 }
369 return 0; 411 return 0;
370} 412}
371 413
372int 414int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
373apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, 415 unsigned int symindex, unsigned int relsec,
374 unsigned int symindex, unsigned int relsec, 416 struct module *me)
375 struct module *me)
376{ 417{
377 Elf_Addr base; 418 Elf_Addr base;
378 Elf_Sym *symtab; 419 Elf_Sym *symtab;
@@ -388,7 +429,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
388 n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); 429 n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
389 430
390 for (i = 0; i < n; i++, rela++) { 431 for (i = 0; i < n; i++, rela++) {
391 rc = apply_rela(rela, base, symtab, me); 432 rc = apply_rela(rela, base, symtab, strtab, me);
392 if (rc) 433 if (rc)
393 return rc; 434 return rc;
394 } 435 }
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 7918fbea36bb..504175ebf8b0 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -293,7 +293,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
293 * retry this instruction. 293 * retry this instruction.
294 */ 294 */
295 spin_lock(&ipd_lock); 295 spin_lock(&ipd_lock);
296 tmp = get_clock(); 296 tmp = get_tod_clock();
297 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) 297 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
298 ipd_count++; 298 ipd_count++;
299 else 299 else
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 86ec7447e1f5..390d9ae57bb2 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -367,13 +367,6 @@ static int __hw_perf_event_init(struct perf_event *event)
367 if (ev >= PERF_CPUM_CF_MAX_CTR) 367 if (ev >= PERF_CPUM_CF_MAX_CTR)
368 return -EINVAL; 368 return -EINVAL;
369 369
370 /* The CPU measurement counter facility does not have any interrupts
371 * to do sampling. Sampling must be provided by external means,
372 * for example, by timers.
373 */
374 if (hwc->sample_period)
375 return -EINVAL;
376
377 /* Use the hardware perf event structure to store the counter number 370 /* Use the hardware perf event structure to store the counter number
378 * in 'config' member and the counter set to which the counter belongs 371 * in 'config' member and the counter set to which the counter belongs
379 * in the 'config_base'. The counter set (config_base) is then used 372 * in the 'config_base'. The counter set (config_base) is then used
@@ -418,6 +411,12 @@ static int cpumf_pmu_event_init(struct perf_event *event)
418 case PERF_TYPE_HARDWARE: 411 case PERF_TYPE_HARDWARE:
419 case PERF_TYPE_HW_CACHE: 412 case PERF_TYPE_HW_CACHE:
420 case PERF_TYPE_RAW: 413 case PERF_TYPE_RAW:
414 /* The CPU measurement counter facility does not have overflow
415 * interrupts to do sampling. Sampling must be provided by
416 * external means, for example, by timers.
417 */
418 if (is_sampling_event(event))
419 return -ENOENT;
421 err = __hw_perf_event_init(event); 420 err = __hw_perf_event_init(event);
422 break; 421 break;
423 default: 422 default:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7433a2f9e5cc..549c9d173c0f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -365,16 +365,16 @@ void smp_emergency_stop(cpumask_t *cpumask)
365 u64 end; 365 u64 end;
366 int cpu; 366 int cpu;
367 367
368 end = get_clock() + (1000000UL << 12); 368 end = get_tod_clock() + (1000000UL << 12);
369 for_each_cpu(cpu, cpumask) { 369 for_each_cpu(cpu, cpumask) {
370 struct pcpu *pcpu = pcpu_devices + cpu; 370 struct pcpu *pcpu = pcpu_devices + cpu;
371 set_bit(ec_stop_cpu, &pcpu->ec_mask); 371 set_bit(ec_stop_cpu, &pcpu->ec_mask);
372 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 372 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
373 0, NULL) == SIGP_CC_BUSY && 373 0, NULL) == SIGP_CC_BUSY &&
374 get_clock() < end) 374 get_tod_clock() < end)
375 cpu_relax(); 375 cpu_relax();
376 } 376 }
377 while (get_clock() < end) { 377 while (get_tod_clock() < end) {
378 for_each_cpu(cpu, cpumask) 378 for_each_cpu(cpu, cpumask)
379 if (pcpu_stopped(pcpu_devices + cpu)) 379 if (pcpu_stopped(pcpu_devices + cpu))
380 cpumask_clear_cpu(cpu, cpumask); 380 cpumask_clear_cpu(cpu, cpumask);
@@ -694,7 +694,7 @@ static void __init smp_detect_cpus(void)
694 */ 694 */
695static void __cpuinit smp_start_secondary(void *cpuvoid) 695static void __cpuinit smp_start_secondary(void *cpuvoid)
696{ 696{
697 S390_lowcore.last_update_clock = get_clock(); 697 S390_lowcore.last_update_clock = get_tod_clock();
698 S390_lowcore.restart_stack = (unsigned long) restart_stack; 698 S390_lowcore.restart_stack = (unsigned long) restart_stack;
699 S390_lowcore.restart_fn = (unsigned long) do_restart; 699 S390_lowcore.restart_fn = (unsigned long) do_restart;
700 S390_lowcore.restart_data = 0; 700 S390_lowcore.restart_data = 0;
@@ -947,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev,
947 unsigned int sequence; 947 unsigned int sequence;
948 948
949 do { 949 do {
950 now = get_clock(); 950 now = get_tod_clock();
951 sequence = ACCESS_ONCE(idle->sequence); 951 sequence = ACCESS_ONCE(idle->sequence);
952 idle_time = ACCESS_ONCE(idle->idle_time); 952 idle_time = ACCESS_ONCE(idle->idle_time);
953 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 953 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0aa98db8a80d..876546b9cfa1 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
63 */ 63 */
64unsigned long long notrace __kprobes sched_clock(void) 64unsigned long long notrace __kprobes sched_clock(void)
65{ 65{
66 return tod_to_ns(get_clock_monotonic()); 66 return tod_to_ns(get_tod_clock_monotonic());
67} 67}
68 68
69/* 69/*
@@ -194,7 +194,7 @@ static void stp_reset(void);
194 194
195void read_persistent_clock(struct timespec *ts) 195void read_persistent_clock(struct timespec *ts)
196{ 196{
197 tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); 197 tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
198} 198}
199 199
200void read_boot_clock(struct timespec *ts) 200void read_boot_clock(struct timespec *ts)
@@ -204,7 +204,7 @@ void read_boot_clock(struct timespec *ts)
204 204
205static cycle_t read_tod_clock(struct clocksource *cs) 205static cycle_t read_tod_clock(struct clocksource *cs)
206{ 206{
207 return get_clock(); 207 return get_tod_clock();
208} 208}
209 209
210static struct clocksource clocksource_tod = { 210static struct clocksource clocksource_tod = {
@@ -342,7 +342,7 @@ int get_sync_clock(unsigned long long *clock)
342 342
343 sw_ptr = &get_cpu_var(clock_sync_word); 343 sw_ptr = &get_cpu_var(clock_sync_word);
344 sw0 = atomic_read(sw_ptr); 344 sw0 = atomic_read(sw_ptr);
345 *clock = get_clock(); 345 *clock = get_tod_clock();
346 sw1 = atomic_read(sw_ptr); 346 sw1 = atomic_read(sw_ptr);
347 put_cpu_var(clock_sync_word); 347 put_cpu_var(clock_sync_word);
348 if (sw0 == sw1 && (sw0 & 0x80000000U)) 348 if (sw0 == sw1 && (sw0 & 0x80000000U))
@@ -486,7 +486,7 @@ static void etr_reset(void)
486 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, 486 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
487 .es = 0, .sl = 0 }; 487 .es = 0, .sl = 0 };
488 if (etr_setr(&etr_eacr) == 0) { 488 if (etr_setr(&etr_eacr) == 0) {
489 etr_tolec = get_clock(); 489 etr_tolec = get_tod_clock();
490 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); 490 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
491 if (etr_port0_online && etr_port1_online) 491 if (etr_port0_online && etr_port1_online)
492 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); 492 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
@@ -768,8 +768,8 @@ static int etr_sync_clock(void *data)
768 __ctl_set_bit(14, 21); 768 __ctl_set_bit(14, 21);
769 __ctl_set_bit(0, 29); 769 __ctl_set_bit(0, 29);
770 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; 770 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
771 old_clock = get_clock(); 771 old_clock = get_tod_clock();
772 if (set_clock(clock) == 0) { 772 if (set_tod_clock(clock) == 0) {
773 __udelay(1); /* Wait for the clock to start. */ 773 __udelay(1); /* Wait for the clock to start. */
774 __ctl_clear_bit(0, 29); 774 __ctl_clear_bit(0, 29);
775 __ctl_clear_bit(14, 21); 775 __ctl_clear_bit(14, 21);
@@ -845,7 +845,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
845 * assume that this can have caused an stepping 845 * assume that this can have caused an stepping
846 * port switch. 846 * port switch.
847 */ 847 */
848 etr_tolec = get_clock(); 848 etr_tolec = get_tod_clock();
849 eacr.p0 = etr_port0_online; 849 eacr.p0 = etr_port0_online;
850 if (!eacr.p0) 850 if (!eacr.p0)
851 eacr.e0 = 0; 851 eacr.e0 = 0;
@@ -858,7 +858,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
858 * assume that this can have caused an stepping 858 * assume that this can have caused an stepping
859 * port switch. 859 * port switch.
860 */ 860 */
861 etr_tolec = get_clock(); 861 etr_tolec = get_tod_clock();
862 eacr.p1 = etr_port1_online; 862 eacr.p1 = etr_port1_online;
863 if (!eacr.p1) 863 if (!eacr.p1)
864 eacr.e1 = 0; 864 eacr.e1 = 0;
@@ -974,7 +974,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
974 etr_eacr = eacr; 974 etr_eacr = eacr;
975 etr_setr(&etr_eacr); 975 etr_setr(&etr_eacr);
976 if (dp_changed) 976 if (dp_changed)
977 etr_tolec = get_clock(); 977 etr_tolec = get_tod_clock();
978} 978}
979 979
980/* 980/*
@@ -1012,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work)
1012 /* Store aib to get the current ETR status word. */ 1012 /* Store aib to get the current ETR status word. */
1013 BUG_ON(etr_stetr(&aib) != 0); 1013 BUG_ON(etr_stetr(&aib) != 0);
1014 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ 1014 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
1015 now = get_clock(); 1015 now = get_tod_clock();
1016 1016
1017 /* 1017 /*
1018 * Update the port information if the last stepping port change 1018 * Update the port information if the last stepping port change
@@ -1537,10 +1537,10 @@ static int stp_sync_clock(void *data)
1537 if (stp_info.todoff[0] || stp_info.todoff[1] || 1537 if (stp_info.todoff[0] || stp_info.todoff[1] ||
1538 stp_info.todoff[2] || stp_info.todoff[3] || 1538 stp_info.todoff[2] || stp_info.todoff[3] ||
1539 stp_info.tmd != 2) { 1539 stp_info.tmd != 2) {
1540 old_clock = get_clock(); 1540 old_clock = get_tod_clock();
1541 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); 1541 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
1542 if (rc == 0) { 1542 if (rc == 0) {
1543 delta = adjust_time(old_clock, get_clock(), 0); 1543 delta = adjust_time(old_clock, get_tod_clock(), 0);
1544 fixup_clock_comparator(delta); 1544 fixup_clock_comparator(delta);
1545 rc = chsc_sstpi(stp_page, &stp_info, 1545 rc = chsc_sstpi(stp_page, &stp_info,
1546 sizeof(struct stp_sstpi)); 1546 sizeof(struct stp_sstpi));
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 79cb51adc741..35b13ed0af5f 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -75,6 +75,10 @@ SECTIONS
75 EXIT_TEXT 75 EXIT_TEXT
76 } 76 }
77 77
78 .exit.data : {
79 EXIT_DATA
80 }
81
78 /* early.c uses stsi, which requires page aligned data. */ 82 /* early.c uses stsi, which requires page aligned data. */
79 . = ALIGN(PAGE_SIZE); 83 . = ALIGN(PAGE_SIZE);
80 INIT_DATA_SECTION(0x100) 84 INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ce9cc5aa2033..a0042acbd989 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu)
191 unsigned int sequence; 191 unsigned int sequence;
192 192
193 do { 193 do {
194 now = get_clock(); 194 now = get_tod_clock();
195 sequence = ACCESS_ONCE(idle->sequence); 195 sequence = ACCESS_ONCE(idle->sequence);
196 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 196 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
197 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 197 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 82c481ddef76..87418b50f21c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -362,7 +362,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
362 } 362 }
363 363
364 if ((!rc) && (vcpu->arch.sie_block->ckc < 364 if ((!rc) && (vcpu->arch.sie_block->ckc <
365 get_clock() + vcpu->arch.sie_block->epoch)) { 365 get_tod_clock() + vcpu->arch.sie_block->epoch)) {
366 if ((!psw_extint_disabled(vcpu)) && 366 if ((!psw_extint_disabled(vcpu)) &&
367 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 367 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
368 rc = 1; 368 rc = 1;
@@ -402,7 +402,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
402 goto no_timer; 402 goto no_timer;
403 } 403 }
404 404
405 now = get_clock() + vcpu->arch.sie_block->epoch; 405 now = get_tod_clock() + vcpu->arch.sie_block->epoch;
406 if (vcpu->arch.sie_block->ckc < now) { 406 if (vcpu->arch.sie_block->ckc < now) {
407 __unset_cpu_idle(vcpu); 407 __unset_cpu_idle(vcpu);
408 return 0; 408 return 0;
@@ -492,7 +492,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
492 } 492 }
493 493
494 if ((vcpu->arch.sie_block->ckc < 494 if ((vcpu->arch.sie_block->ckc <
495 get_clock() + vcpu->arch.sie_block->epoch)) 495 get_tod_clock() + vcpu->arch.sie_block->epoch))
496 __try_deliver_ckc_interrupt(vcpu); 496 __try_deliver_ckc_interrupt(vcpu);
497 497
498 if (atomic_read(&fi->active)) { 498 if (atomic_read(&fi->active)) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f090e819bf71..2923781590a6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -147,7 +147,7 @@ int kvm_dev_ioctl_check_extension(long ext)
147 r = KVM_MAX_VCPUS; 147 r = KVM_MAX_VCPUS;
148 break; 148 break;
149 case KVM_CAP_S390_COW: 149 case KVM_CAP_S390_COW:
150 r = sclp_get_fac85() & 0x2; 150 r = MACHINE_HAS_ESOP;
151 break; 151 break;
152 default: 152 default:
153 r = 0; 153 r = 0;
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 42d0cf89121d..c61b9fad43cc 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -32,7 +32,7 @@ static void __udelay_disabled(unsigned long long usecs)
32 unsigned long cr0, cr6, new; 32 unsigned long cr0, cr6, new;
33 u64 clock_saved, end; 33 u64 clock_saved, end;
34 34
35 end = get_clock() + (usecs << 12); 35 end = get_tod_clock() + (usecs << 12);
36 clock_saved = local_tick_disable(); 36 clock_saved = local_tick_disable();
37 __ctl_store(cr0, 0, 0); 37 __ctl_store(cr0, 0, 0);
38 __ctl_store(cr6, 6, 6); 38 __ctl_store(cr6, 6, 6);
@@ -45,7 +45,7 @@ static void __udelay_disabled(unsigned long long usecs)
45 set_clock_comparator(end); 45 set_clock_comparator(end);
46 vtime_stop_cpu(); 46 vtime_stop_cpu();
47 local_irq_disable(); 47 local_irq_disable();
48 } while (get_clock() < end); 48 } while (get_tod_clock() < end);
49 lockdep_on(); 49 lockdep_on();
50 __ctl_load(cr0, 0, 0); 50 __ctl_load(cr0, 0, 0);
51 __ctl_load(cr6, 6, 6); 51 __ctl_load(cr6, 6, 6);
@@ -56,7 +56,7 @@ static void __udelay_enabled(unsigned long long usecs)
56{ 56{
57 u64 clock_saved, end; 57 u64 clock_saved, end;
58 58
59 end = get_clock() + (usecs << 12); 59 end = get_tod_clock() + (usecs << 12);
60 do { 60 do {
61 clock_saved = 0; 61 clock_saved = 0;
62 if (end < S390_lowcore.clock_comparator) { 62 if (end < S390_lowcore.clock_comparator) {
@@ -67,7 +67,7 @@ static void __udelay_enabled(unsigned long long usecs)
67 local_irq_disable(); 67 local_irq_disable();
68 if (clock_saved) 68 if (clock_saved)
69 local_tick_enable(clock_saved); 69 local_tick_enable(clock_saved);
70 } while (get_clock() < end); 70 } while (get_tod_clock() < end);
71} 71}
72 72
73/* 73/*
@@ -111,8 +111,8 @@ void udelay_simple(unsigned long long usecs)
111{ 111{
112 u64 end; 112 u64 end;
113 113
114 end = get_clock() + (usecs << 12); 114 end = get_tod_clock() + (usecs << 12);
115 while (get_clock() < end) 115 while (get_tod_clock() < end)
116 cpu_relax(); 116 cpu_relax();
117} 117}
118 118
@@ -122,10 +122,10 @@ void __ndelay(unsigned long long nsecs)
122 122
123 nsecs <<= 9; 123 nsecs <<= 9;
124 do_div(nsecs, 125); 124 do_div(nsecs, 125);
125 end = get_clock() + nsecs; 125 end = get_tod_clock() + nsecs;
126 if (nsecs & ~0xfffUL) 126 if (nsecs & ~0xfffUL)
127 __udelay(nsecs >> 12); 127 __udelay(nsecs >> 12);
128 while (get_clock() < end) 128 while (get_tod_clock() < end)
129 barrier(); 129 barrier();
130} 130}
131EXPORT_SYMBOL(__ndelay); 131EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 9017a63dda3d..a70ee84c0241 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -50,7 +50,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm,
50 ptep = pte_offset_map(pmd, addr); 50 ptep = pte_offset_map(pmd, addr);
51 if (!pte_present(*ptep)) 51 if (!pte_present(*ptep))
52 return -0x11UL; 52 return -0x11UL;
53 if (write && !pte_write(*ptep)) 53 if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
54 return -0x04UL; 54 return -0x04UL;
55 55
56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); 56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index c59a5efa58b1..06bafec00278 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,12 +101,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
101 101
102#else 102#else
103 103
104int s390_mmap_check(unsigned long addr, unsigned long len) 104int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
105{ 105{
106 int rc; 106 int rc;
107 107
108 if (!is_compat_task() && 108 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { 109 return 0;
110 if (!(flags & MAP_FIXED))
111 addr = 0;
112 if ((addr + len) >= TASK_SIZE) {
110 rc = crst_table_upgrade(current->mm, 1UL << 53); 113 rc = crst_table_upgrade(current->mm, 1UL << 53);
111 if (rc) 114 if (rc)
112 return rc; 115 return rc;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 29ccee3651f4..d21040ed5e59 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -127,7 +127,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
127 pte_val(*pte) = _PAGE_TYPE_EMPTY; 127 pte_val(*pte) = _PAGE_TYPE_EMPTY;
128 continue; 128 continue;
129 } 129 }
130 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); 130 pte_val(*pte) = __pa(address);
131 } 131 }
132} 132}
133 133
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 6ed1426d27c5..79699f46a443 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -85,11 +85,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
85 pud_t *pu_dir; 85 pud_t *pu_dir;
86 pmd_t *pm_dir; 86 pmd_t *pm_dir;
87 pte_t *pt_dir; 87 pte_t *pt_dir;
88 pte_t pte;
89 int ret = -ENOMEM; 88 int ret = -ENOMEM;
90 89
91 while (address < end) { 90 while (address < end) {
92 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
93 pg_dir = pgd_offset_k(address); 91 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) { 92 if (pgd_none(*pg_dir)) {
95 pu_dir = vmem_pud_alloc(); 93 pu_dir = vmem_pud_alloc();
@@ -101,9 +99,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 99#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 100 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
104 pte_val(pte) |= _REGION3_ENTRY_LARGE; 102 pud_val(*pu_dir) = __pa(address) |
105 pte_val(pte) |= _REGION_ENTRY_TYPE_R3; 103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
106 pud_val(*pu_dir) = pte_val(pte); 104 (ro ? _REGION_ENTRY_RO : 0);
107 address += PUD_SIZE; 105 address += PUD_SIZE;
108 continue; 106 continue;
109 } 107 }
@@ -118,8 +116,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 116#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 117 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
121 pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 119 pmd_val(*pm_dir) = __pa(address) |
122 pmd_val(*pm_dir) = pte_val(pte); 120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121 (ro ? _SEGMENT_ENTRY_RO : 0);
123 address += PMD_SIZE; 122 address += PMD_SIZE;
124 continue; 123 continue;
125 } 124 }
@@ -132,7 +131,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
132 } 131 }
133 132
134 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
135 *pt_dir = pte; 134 pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
136 address += PAGE_SIZE; 135 address += PAGE_SIZE;
137 } 136 }
138 ret = 0; 137 ret = 0;
@@ -199,7 +198,6 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
199 pud_t *pu_dir; 198 pud_t *pu_dir;
200 pmd_t *pm_dir; 199 pmd_t *pm_dir;
201 pte_t *pt_dir; 200 pte_t *pt_dir;
202 pte_t pte;
203 int ret = -ENOMEM; 201 int ret = -ENOMEM;
204 202
205 start_addr = (unsigned long) start; 203 start_addr = (unsigned long) start;
@@ -237,9 +235,8 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
237 new_page = vmemmap_alloc_block(PMD_SIZE, node); 235 new_page = vmemmap_alloc_block(PMD_SIZE, node);
238 if (!new_page) 236 if (!new_page)
239 goto out; 237 goto out;
240 pte = mk_pte_phys(__pa(new_page), PAGE_RW); 238 pmd_val(*pm_dir) = __pa(new_page) |
241 pte_val(pte) |= _SEGMENT_ENTRY_LARGE; 239 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
242 pmd_val(*pm_dir) = pte_val(pte);
243 address = (address + PMD_SIZE) & PMD_MASK; 240 address = (address + PMD_SIZE) & PMD_MASK;
244 continue; 241 continue;
245 } 242 }
@@ -260,8 +257,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
260 new_page =__pa(vmem_alloc_pages(0)); 257 new_page =__pa(vmem_alloc_pages(0));
261 if (!new_page) 258 if (!new_page)
262 goto out; 259 goto out;
263 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 260 pte_val(*pt_dir) = __pa(new_page);
264 *pt_dir = pte;
265 } 261 }
266 address += PAGE_SIZE; 262 address += PAGE_SIZE;
267 } 263 }
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bb284419b0fd..0972e91cced2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -7,6 +7,7 @@
7 */ 7 */
8#include <linux/moduleloader.h> 8#include <linux/moduleloader.h>
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h>
10#include <linux/filter.h> 11#include <linux/filter.h>
11#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
12#include <asm/processor.h> 13#include <asm/processor.h>
@@ -254,6 +255,8 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
254 case BPF_S_ANC_HATYPE: 255 case BPF_S_ANC_HATYPE:
255 case BPF_S_ANC_RXHASH: 256 case BPF_S_ANC_RXHASH:
256 case BPF_S_ANC_CPU: 257 case BPF_S_ANC_CPU:
258 case BPF_S_ANC_VLAN_TAG:
259 case BPF_S_ANC_VLAN_TAG_PRESENT:
257 case BPF_S_RET_K: 260 case BPF_S_RET_K:
258 /* first instruction sets A register */ 261 /* first instruction sets A register */
259 break; 262 break;
@@ -699,6 +702,24 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
699 /* l %r5,<d(rxhash)>(%r2) */ 702 /* l %r5,<d(rxhash)>(%r2) */
700 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash)); 703 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
701 break; 704 break;
705 case BPF_S_ANC_VLAN_TAG:
706 case BPF_S_ANC_VLAN_TAG_PRESENT:
707 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
708 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
709 /* lhi %r5,0 */
710 EMIT4(0xa7580000);
711 /* icm %r5,3,<d(vlan_tci)>(%r2) */
712 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
713 if (filter->code == BPF_S_ANC_VLAN_TAG) {
714 /* nill %r5,0xefff */
715 EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
716 } else {
717 /* nill %r5,0x1000 */
718 EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
719 /* srl %r5,12 */
720 EMIT4_DISP(0x88500000, 12);
721 }
722 break;
702 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 723 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
703#ifdef CONFIG_SMP 724#ifdef CONFIG_SMP
704 /* l %r5,<d(cpu_nr)> */ 725 /* l %r5,<d(cpu_nr)> */
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 60e0372545d2..27b4c17855b9 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -51,8 +51,7 @@ EXPORT_SYMBOL_GPL(zpci_list);
51DEFINE_MUTEX(zpci_list_lock); 51DEFINE_MUTEX(zpci_list_lock);
52EXPORT_SYMBOL_GPL(zpci_list_lock); 52EXPORT_SYMBOL_GPL(zpci_list_lock);
53 53
54struct pci_hp_callback_ops hotplug_ops; 54static struct pci_hp_callback_ops *hotplug_ops;
55EXPORT_SYMBOL_GPL(hotplug_ops);
56 55
57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 56static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
58static DEFINE_SPINLOCK(zpci_domain_lock); 57static DEFINE_SPINLOCK(zpci_domain_lock);
@@ -974,8 +973,8 @@ int zpci_create_device(struct zpci_dev *zdev)
974 973
975 mutex_lock(&zpci_list_lock); 974 mutex_lock(&zpci_list_lock);
976 list_add_tail(&zdev->entry, &zpci_list); 975 list_add_tail(&zdev->entry, &zpci_list);
977 if (hotplug_ops.create_slot) 976 if (hotplug_ops)
978 hotplug_ops.create_slot(zdev); 977 hotplug_ops->create_slot(zdev);
979 mutex_unlock(&zpci_list_lock); 978 mutex_unlock(&zpci_list_lock);
980 979
981 if (zdev->state == ZPCI_FN_STATE_STANDBY) 980 if (zdev->state == ZPCI_FN_STATE_STANDBY)
@@ -989,8 +988,8 @@ int zpci_create_device(struct zpci_dev *zdev)
989out_start: 988out_start:
990 mutex_lock(&zpci_list_lock); 989 mutex_lock(&zpci_list_lock);
991 list_del(&zdev->entry); 990 list_del(&zdev->entry);
992 if (hotplug_ops.remove_slot) 991 if (hotplug_ops)
993 hotplug_ops.remove_slot(zdev); 992 hotplug_ops->remove_slot(zdev);
994 mutex_unlock(&zpci_list_lock); 993 mutex_unlock(&zpci_list_lock);
995out_bus: 994out_bus:
996 zpci_free_domain(zdev); 995 zpci_free_domain(zdev);
@@ -1072,13 +1071,29 @@ static void zpci_mem_exit(void)
1072 kmem_cache_destroy(zdev_fmb_cache); 1071 kmem_cache_destroy(zdev_fmb_cache);
1073} 1072}
1074 1073
1075unsigned int pci_probe = 1; 1074void zpci_register_hp_ops(struct pci_hp_callback_ops *ops)
1076EXPORT_SYMBOL_GPL(pci_probe); 1075{
1076 mutex_lock(&zpci_list_lock);
1077 hotplug_ops = ops;
1078 mutex_unlock(&zpci_list_lock);
1079}
1080EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
1081
1082void zpci_deregister_hp_ops(void)
1083{
1084 mutex_lock(&zpci_list_lock);
1085 hotplug_ops = NULL;
1086 mutex_unlock(&zpci_list_lock);
1087}
1088EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
1089
1090unsigned int s390_pci_probe = 1;
1091EXPORT_SYMBOL_GPL(s390_pci_probe);
1077 1092
1078char * __init pcibios_setup(char *str) 1093char * __init pcibios_setup(char *str)
1079{ 1094{
1080 if (!strcmp(str, "off")) { 1095 if (!strcmp(str, "off")) {
1081 pci_probe = 0; 1096 s390_pci_probe = 0;
1082 return NULL; 1097 return NULL;
1083 } 1098 }
1084 return str; 1099 return str;
@@ -1088,7 +1103,7 @@ static int __init pci_base_init(void)
1088{ 1103{
1089 int rc; 1104 int rc;
1090 1105
1091 if (!pci_probe) 1106 if (!s390_pci_probe)
1092 return 0; 1107 return 0;
1093 1108
1094 if (!test_facility(2) || !test_facility(69) 1109 if (!test_facility(2) || !test_facility(69)
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 2c847143cbd1..f339fe2feb15 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -19,25 +19,25 @@
19 * Call Logical Processor 19 * Call Logical Processor
20 * Retry logic is handled by the caller. 20 * Retry logic is handled by the caller.
21 */ 21 */
22static inline u8 clp_instr(void *req) 22static inline u8 clp_instr(void *data)
23{ 23{
24 u64 ilpm; 24 struct { u8 _[CLP_BLK_SIZE]; } *req = data;
25 u64 ignored;
25 u8 cc; 26 u8 cc;
26 27
27 asm volatile ( 28 asm volatile (
28 " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n" 29 " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
29 " ipm %[cc]\n" 30 " ipm %[cc]\n"
30 " srl %[cc],28\n" 31 " srl %[cc],28\n"
31 : [cc] "=d" (cc), [ilpm] "=d" (ilpm) 32 : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req)
32 : [req] "a" (req) 33 : [req] "a" (req)
33 : "cc", "memory"); 34 : "cc");
34 return cc; 35 return cc;
35} 36}
36 37
37static void *clp_alloc_block(void) 38static void *clp_alloc_block(void)
38{ 39{
39 struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); 40 return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
40 return (page) ? page_address(page) : NULL;
41} 41}
42 42
43static void clp_free_block(void *ptr) 43static void clp_free_block(void *ptr)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cdadce23e66d..4cfb7200260d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,7 +14,7 @@ menuconfig ATA
14 tristate "Serial ATA and Parallel ATA drivers" 14 tristate "Serial ATA and Parallel ATA drivers"
15 depends on HAS_IOMEM 15 depends on HAS_IOMEM
16 depends on BLOCK 16 depends on BLOCK
17 depends on !(M32R || M68K) || BROKEN 17 depends on !(M32R || M68K || S390) || BROKEN
18 select SCSI 18 select SCSI
19 ---help--- 19 ---help---
20 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or 20 If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1855a6fd2b0a..b89d250f56e7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -277,7 +277,7 @@ config GPIO_ICH
277 277
278config GPIO_VX855 278config GPIO_VX855
279 tristate "VIA VX855/VX875 GPIO" 279 tristate "VIA VX855/VX875 GPIO"
280 depends on PCI 280 depends on PCI && GENERIC_HARDIRQS
281 select MFD_CORE 281 select MFD_CORE
282 select MFD_VX855 282 select MFD_VX855
283 help 283 help
@@ -599,7 +599,7 @@ config GPIO_TIMBERDALE
599 599
600config GPIO_RDC321X 600config GPIO_RDC321X
601 tristate "RDC R-321x GPIO support" 601 tristate "RDC R-321x GPIO support"
602 depends on PCI 602 depends on PCI && GENERIC_HARDIRQS
603 select MFD_CORE 603 select MFD_CORE
604 select MFD_RDC321X 604 select MFD_RDC321X
605 help 605 help
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8090b87b3066..9e580166161a 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -180,7 +180,7 @@ config RADIO_TIMBERDALE
180 180
181config RADIO_WL1273 181config RADIO_WL1273
182 tristate "Texas Instruments WL1273 I2C FM Radio" 182 tristate "Texas Instruments WL1273 I2C FM Radio"
183 depends on I2C && VIDEO_V4L2 183 depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS
184 select MFD_CORE 184 select MFD_CORE
185 select MFD_WL1273_CORE 185 select MFD_WL1273_CORE
186 select FW_LOADER 186 select FW_LOADER
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index ceb0de0cf62c..1194446f859a 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,6 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on GENERIC_HARDIRQS
25 select NET_CORE 26 select NET_CORE
26 select MACB 27 select MACB
27 ---help--- 28 ---help---
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 961f0b293913..450345261bd3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig PHYLIB 5menuconfig PHYLIB
6 tristate "PHY Device support and infrastructure" 6 tristate "PHY Device support and infrastructure"
7 depends on !S390
8 depends on NETDEVICES 7 depends on NETDEVICES
9 help 8 help
10 Ethernet controllers are usually attached to PHY 9 Ethernet controllers are usually attached to PHY
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 0e60438ebe30..24e12d4d1769 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -35,7 +35,7 @@ if PARPORT
35 35
36config PARPORT_PC 36config PARPORT_PC
37 tristate "PC-style hardware" 37 tristate "PC-style hardware"
38 depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \ 38 depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
39 (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA 39 (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && !XTENSA
40 ---help--- 40 ---help---
41 You should say Y here if you have a PC-style parallel port. All 41 You should say Y here if you have a PC-style parallel port. All
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index dee68e0698e1..7db249a25016 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -172,25 +172,6 @@ error:
172 return -ENOMEM; 172 return -ENOMEM;
173} 173}
174 174
175static int __init init_pci_slots(void)
176{
177 struct zpci_dev *zdev;
178 int device = 0;
179
180 /*
181 * Create a structure for each slot, and register that slot
182 * with the pci_hotplug subsystem.
183 */
184 mutex_lock(&zpci_list_lock);
185 list_for_each_entry(zdev, &zpci_list, entry) {
186 init_pci_slot(zdev);
187 device++;
188 }
189
190 mutex_unlock(&zpci_list_lock);
191 return (device) ? 0 : -ENODEV;
192}
193
194static void exit_pci_slot(struct zpci_dev *zdev) 175static void exit_pci_slot(struct zpci_dev *zdev)
195{ 176{
196 struct list_head *tmp, *n; 177 struct list_head *tmp, *n;
@@ -205,6 +186,26 @@ static void exit_pci_slot(struct zpci_dev *zdev)
205 } 186 }
206} 187}
207 188
189static struct pci_hp_callback_ops hp_ops = {
190 .create_slot = init_pci_slot,
191 .remove_slot = exit_pci_slot,
192};
193
194static void __init init_pci_slots(void)
195{
196 struct zpci_dev *zdev;
197
198 /*
199 * Create a structure for each slot, and register that slot
200 * with the pci_hotplug subsystem.
201 */
202 mutex_lock(&zpci_list_lock);
203 list_for_each_entry(zdev, &zpci_list, entry) {
204 init_pci_slot(zdev);
205 }
206 mutex_unlock(&zpci_list_lock);
207}
208
208static void __exit exit_pci_slots(void) 209static void __exit exit_pci_slots(void)
209{ 210{
210 struct list_head *tmp, *n; 211 struct list_head *tmp, *n;
@@ -224,28 +225,19 @@ static void __exit exit_pci_slots(void)
224 225
225static int __init pci_hotplug_s390_init(void) 226static int __init pci_hotplug_s390_init(void)
226{ 227{
227 /* 228 if (!s390_pci_probe)
228 * Do specific initialization stuff for your driver here
229 * like initializing your controller hardware (if any) and
230 * determining the number of slots you have in the system
231 * right now.
232 */
233
234 if (!pci_probe)
235 return -EOPNOTSUPP; 229 return -EOPNOTSUPP;
236 230
237 /* register callbacks for slot handling from arch code */ 231 zpci_register_hp_ops(&hp_ops);
238 mutex_lock(&zpci_list_lock); 232 init_pci_slots();
239 hotplug_ops.create_slot = init_pci_slot; 233
240 hotplug_ops.remove_slot = exit_pci_slot; 234 return 0;
241 mutex_unlock(&zpci_list_lock);
242 pr_info("registered hotplug slot callbacks\n");
243 return init_pci_slots();
244} 235}
245 236
246static void __exit pci_hotplug_s390_exit(void) 237static void __exit pci_hotplug_s390_exit(void)
247{ 238{
248 exit_pci_slots(); 239 exit_pci_slots();
240 zpci_deregister_hp_ops();
249} 241}
250 242
251module_init(pci_hotplug_s390_init); 243module_init(pci_hotplug_s390_init);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29225e1c159c..f1b7fdc58a5f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1352,7 +1352,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
1352 switch (rc) { 1352 switch (rc) {
1353 case 0: /* termination successful */ 1353 case 0: /* termination successful */
1354 cqr->status = DASD_CQR_CLEAR_PENDING; 1354 cqr->status = DASD_CQR_CLEAR_PENDING;
1355 cqr->stopclk = get_clock(); 1355 cqr->stopclk = get_tod_clock();
1356 cqr->starttime = 0; 1356 cqr->starttime = 0;
1357 DBF_DEV_EVENT(DBF_DEBUG, device, 1357 DBF_DEV_EVENT(DBF_DEBUG, device,
1358 "terminate cqr %p successful", 1358 "terminate cqr %p successful",
@@ -1420,7 +1420,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
1420 cqr->status = DASD_CQR_ERROR; 1420 cqr->status = DASD_CQR_ERROR;
1421 return -EIO; 1421 return -EIO;
1422 } 1422 }
1423 cqr->startclk = get_clock(); 1423 cqr->startclk = get_tod_clock();
1424 cqr->starttime = jiffies; 1424 cqr->starttime = jiffies;
1425 cqr->retries--; 1425 cqr->retries--;
1426 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1426 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
@@ -1623,7 +1623,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1623 return; 1623 return;
1624 } 1624 }
1625 1625
1626 now = get_clock(); 1626 now = get_tod_clock();
1627 cqr = (struct dasd_ccw_req *) intparm; 1627 cqr = (struct dasd_ccw_req *) intparm;
1628 /* check for conditions that should be handled immediately */ 1628 /* check for conditions that should be handled immediately */
1629 if (!cqr || 1629 if (!cqr ||
@@ -1963,7 +1963,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
1963 } 1963 }
1964 break; 1964 break;
1965 case DASD_CQR_QUEUED: 1965 case DASD_CQR_QUEUED:
1966 cqr->stopclk = get_clock(); 1966 cqr->stopclk = get_tod_clock();
1967 cqr->status = DASD_CQR_CLEARED; 1967 cqr->status = DASD_CQR_CLEARED;
1968 break; 1968 break;
1969 default: /* no need to modify the others */ 1969 default: /* no need to modify the others */
@@ -2210,7 +2210,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2210 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2210 wait_event(generic_waitq, _wait_for_wakeup(cqr));
2211 } 2211 }
2212 2212
2213 maincqr->endclk = get_clock(); 2213 maincqr->endclk = get_tod_clock();
2214 if ((maincqr->status != DASD_CQR_DONE) && 2214 if ((maincqr->status != DASD_CQR_DONE) &&
2215 (maincqr->intrc != -ERESTARTSYS)) 2215 (maincqr->intrc != -ERESTARTSYS))
2216 dasd_log_sense(maincqr, &maincqr->irb); 2216 dasd_log_sense(maincqr, &maincqr->irb);
@@ -2340,7 +2340,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
2340 "Cancelling request %p failed with rc=%d\n", 2340 "Cancelling request %p failed with rc=%d\n",
2341 cqr, rc); 2341 cqr, rc);
2342 } else { 2342 } else {
2343 cqr->stopclk = get_clock(); 2343 cqr->stopclk = get_tod_clock();
2344 } 2344 }
2345 break; 2345 break;
2346 default: /* already finished or clear pending - do nothing */ 2346 default: /* already finished or clear pending - do nothing */
@@ -2568,7 +2568,7 @@ restart:
2568 } 2568 }
2569 2569
2570 /* Rechain finished requests to final queue */ 2570 /* Rechain finished requests to final queue */
2571 cqr->endclk = get_clock(); 2571 cqr->endclk = get_tod_clock();
2572 list_move_tail(&cqr->blocklist, final_queue); 2572 list_move_tail(&cqr->blocklist, final_queue);
2573 } 2573 }
2574} 2574}
@@ -2711,7 +2711,7 @@ restart_cb:
2711 } 2711 }
2712 /* call the callback function */ 2712 /* call the callback function */
2713 spin_lock_irq(&block->request_queue_lock); 2713 spin_lock_irq(&block->request_queue_lock);
2714 cqr->endclk = get_clock(); 2714 cqr->endclk = get_tod_clock();
2715 list_del_init(&cqr->blocklist); 2715 list_del_init(&cqr->blocklist);
2716 __dasd_cleanup_cqr(cqr); 2716 __dasd_cleanup_cqr(cqr);
2717 spin_unlock_irq(&block->request_queue_lock); 2717 spin_unlock_irq(&block->request_queue_lock);
@@ -3042,12 +3042,15 @@ void dasd_generic_remove(struct ccw_device *cdev)
3042 cdev->handler = NULL; 3042 cdev->handler = NULL;
3043 3043
3044 device = dasd_device_from_cdev(cdev); 3044 device = dasd_device_from_cdev(cdev);
3045 if (IS_ERR(device)) 3045 if (IS_ERR(device)) {
3046 dasd_remove_sysfs_files(cdev);
3046 return; 3047 return;
3048 }
3047 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3049 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3048 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3050 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3049 /* Already doing offline processing */ 3051 /* Already doing offline processing */
3050 dasd_put_device(device); 3052 dasd_put_device(device);
3053 dasd_remove_sysfs_files(cdev);
3051 return; 3054 return;
3052 } 3055 }
3053 /* 3056 /*
@@ -3504,7 +3507,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3504 cqr->memdev = device; 3507 cqr->memdev = device;
3505 cqr->expires = 10*HZ; 3508 cqr->expires = 10*HZ;
3506 cqr->retries = 256; 3509 cqr->retries = 256;
3507 cqr->buildclk = get_clock(); 3510 cqr->buildclk = get_tod_clock();
3508 cqr->status = DASD_CQR_FILLED; 3511 cqr->status = DASD_CQR_FILLED;
3509 return cqr; 3512 return cqr;
3510} 3513}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index f8212d54013a..d26134713682 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -229,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
229 dctl_cqr->expires = 5 * 60 * HZ; 229 dctl_cqr->expires = 5 * 60 * HZ;
230 dctl_cqr->retries = 2; 230 dctl_cqr->retries = 2;
231 231
232 dctl_cqr->buildclk = get_clock(); 232 dctl_cqr->buildclk = get_tod_clock();
233 233
234 dctl_cqr->status = DASD_CQR_FILLED; 234 dctl_cqr->status = DASD_CQR_FILLED;
235 235
@@ -1719,7 +1719,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1719 erp->magic = default_erp->magic; 1719 erp->magic = default_erp->magic;
1720 erp->expires = default_erp->expires; 1720 erp->expires = default_erp->expires;
1721 erp->retries = 256; 1721 erp->retries = 256;
1722 erp->buildclk = get_clock(); 1722 erp->buildclk = get_tod_clock();
1723 erp->status = DASD_CQR_FILLED; 1723 erp->status = DASD_CQR_FILLED;
1724 1724
1725 /* remove the default erp */ 1725 /* remove the default erp */
@@ -2322,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2322 DBF_DEV_EVENT(DBF_ERR, device, "%s", 2322 DBF_DEV_EVENT(DBF_ERR, device, "%s",
2323 "Unable to allocate ERP request"); 2323 "Unable to allocate ERP request");
2324 cqr->status = DASD_CQR_FAILED; 2324 cqr->status = DASD_CQR_FAILED;
2325 cqr->stopclk = get_clock (); 2325 cqr->stopclk = get_tod_clock();
2326 } else { 2326 } else {
2327 DBF_DEV_EVENT(DBF_ERR, device, 2327 DBF_DEV_EVENT(DBF_ERR, device,
2328 "Unable to allocate ERP request " 2328 "Unable to allocate ERP request "
@@ -2364,7 +2364,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2364 erp->magic = cqr->magic; 2364 erp->magic = cqr->magic;
2365 erp->expires = cqr->expires; 2365 erp->expires = cqr->expires;
2366 erp->retries = 256; 2366 erp->retries = 256;
2367 erp->buildclk = get_clock(); 2367 erp->buildclk = get_tod_clock();
2368 erp->status = DASD_CQR_FILLED; 2368 erp->status = DASD_CQR_FILLED;
2369 2369
2370 return erp; 2370 return erp;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 6b556995bb33..a2597e683e79 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -448,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
448 ccw->count = sizeof(*(lcu->uac)); 448 ccw->count = sizeof(*(lcu->uac));
449 ccw->cda = (__u32)(addr_t) lcu->uac; 449 ccw->cda = (__u32)(addr_t) lcu->uac;
450 450
451 cqr->buildclk = get_clock(); 451 cqr->buildclk = get_tod_clock();
452 cqr->status = DASD_CQR_FILLED; 452 cqr->status = DASD_CQR_FILLED;
453 453
454 /* need to unset flag here to detect race with summary unit check */ 454 /* need to unset flag here to detect race with summary unit check */
@@ -733,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
733 cqr->memdev = device; 733 cqr->memdev = device;
734 cqr->block = NULL; 734 cqr->block = NULL;
735 cqr->expires = 5 * HZ; 735 cqr->expires = 5 * HZ;
736 cqr->buildclk = get_clock(); 736 cqr->buildclk = get_tod_clock();
737 cqr->status = DASD_CQR_FILLED; 737 cqr->status = DASD_CQR_FILLED;
738 738
739 rc = dasd_sleep_on_immediatly(cqr); 739 rc = dasd_sleep_on_immediatly(cqr);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 704488d0f819..cc0603358522 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
184 private->iob.bio_list = dreq->bio; 184 private->iob.bio_list = dreq->bio;
185 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 185 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
186 186
187 cqr->startclk = get_clock(); 187 cqr->startclk = get_tod_clock();
188 cqr->starttime = jiffies; 188 cqr->starttime = jiffies;
189 cqr->retries--; 189 cqr->retries--;
190 190
191 rc = dia250(&private->iob, RW_BIO); 191 rc = dia250(&private->iob, RW_BIO);
192 switch (rc) { 192 switch (rc) {
193 case 0: /* Synchronous I/O finished successfully */ 193 case 0: /* Synchronous I/O finished successfully */
194 cqr->stopclk = get_clock(); 194 cqr->stopclk = get_tod_clock();
195 cqr->status = DASD_CQR_SUCCESS; 195 cqr->status = DASD_CQR_SUCCESS;
196 /* Indicate to calling function that only a dasd_schedule_bh() 196 /* Indicate to calling function that only a dasd_schedule_bh()
197 and no timer is needed */ 197 and no timer is needed */
@@ -222,7 +222,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
222 mdsk_term_io(device); 222 mdsk_term_io(device);
223 mdsk_init_io(device, device->block->bp_block, 0, NULL); 223 mdsk_init_io(device, device->block->bp_block, 0, NULL);
224 cqr->status = DASD_CQR_CLEAR_PENDING; 224 cqr->status = DASD_CQR_CLEAR_PENDING;
225 cqr->stopclk = get_clock(); 225 cqr->stopclk = get_tod_clock();
226 dasd_schedule_device_bh(device); 226 dasd_schedule_device_bh(device);
227 return 0; 227 return 0;
228} 228}
@@ -276,7 +276,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
276 return; 276 return;
277 } 277 }
278 278
279 cqr->stopclk = get_clock(); 279 cqr->stopclk = get_tod_clock();
280 280
281 expires = 0; 281 expires = 0;
282 if ((ext_code.subcode & 0xff) == 0) { 282 if ((ext_code.subcode & 0xff) == 0) {
@@ -556,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
556 } 556 }
557 } 557 }
558 cqr->retries = DIAG_MAX_RETRIES; 558 cqr->retries = DIAG_MAX_RETRIES;
559 cqr->buildclk = get_clock(); 559 cqr->buildclk = get_tod_clock();
560 if (blk_noretry_request(req) || 560 if (blk_noretry_request(req) ||
561 block->base->features & DASD_FEATURE_FAILFAST) 561 block->base->features & DASD_FEATURE_FAILFAST)
562 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 562 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e37bc1620d14..33f26bfa62f2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -862,7 +862,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
862 cqr->expires = 10*HZ; 862 cqr->expires = 10*HZ;
863 cqr->lpm = lpm; 863 cqr->lpm = lpm;
864 cqr->retries = 256; 864 cqr->retries = 256;
865 cqr->buildclk = get_clock(); 865 cqr->buildclk = get_tod_clock();
866 cqr->status = DASD_CQR_FILLED; 866 cqr->status = DASD_CQR_FILLED;
867 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 867 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
868} 868}
@@ -1449,7 +1449,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
1449 ccw->count = sizeof(struct dasd_rssd_features); 1449 ccw->count = sizeof(struct dasd_rssd_features);
1450 ccw->cda = (__u32)(addr_t) features; 1450 ccw->cda = (__u32)(addr_t) features;
1451 1451
1452 cqr->buildclk = get_clock(); 1452 cqr->buildclk = get_tod_clock();
1453 cqr->status = DASD_CQR_FILLED; 1453 cqr->status = DASD_CQR_FILLED;
1454 rc = dasd_sleep_on(cqr); 1454 rc = dasd_sleep_on(cqr);
1455 if (rc == 0) { 1455 if (rc == 0) {
@@ -1501,7 +1501,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1501 cqr->block = NULL; 1501 cqr->block = NULL;
1502 cqr->retries = 256; 1502 cqr->retries = 256;
1503 cqr->expires = 10*HZ; 1503 cqr->expires = 10*HZ;
1504 cqr->buildclk = get_clock(); 1504 cqr->buildclk = get_tod_clock();
1505 cqr->status = DASD_CQR_FILLED; 1505 cqr->status = DASD_CQR_FILLED;
1506 return cqr; 1506 return cqr;
1507} 1507}
@@ -1841,7 +1841,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1841 cqr->startdev = device; 1841 cqr->startdev = device;
1842 cqr->memdev = device; 1842 cqr->memdev = device;
1843 cqr->retries = 255; 1843 cqr->retries = 255;
1844 cqr->buildclk = get_clock(); 1844 cqr->buildclk = get_tod_clock();
1845 cqr->status = DASD_CQR_FILLED; 1845 cqr->status = DASD_CQR_FILLED;
1846 return cqr; 1846 return cqr;
1847} 1847}
@@ -2241,7 +2241,7 @@ dasd_eckd_format_device(struct dasd_device * device,
2241 fcp->startdev = device; 2241 fcp->startdev = device;
2242 fcp->memdev = device; 2242 fcp->memdev = device;
2243 fcp->retries = 256; 2243 fcp->retries = 256;
2244 fcp->buildclk = get_clock(); 2244 fcp->buildclk = get_tod_clock();
2245 fcp->status = DASD_CQR_FILLED; 2245 fcp->status = DASD_CQR_FILLED;
2246 return fcp; 2246 return fcp;
2247} 2247}
@@ -2530,7 +2530,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2530 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2530 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2531 cqr->lpm = startdev->path_data.ppm; 2531 cqr->lpm = startdev->path_data.ppm;
2532 cqr->retries = 256; 2532 cqr->retries = 256;
2533 cqr->buildclk = get_clock(); 2533 cqr->buildclk = get_tod_clock();
2534 cqr->status = DASD_CQR_FILLED; 2534 cqr->status = DASD_CQR_FILLED;
2535 return cqr; 2535 return cqr;
2536} 2536}
@@ -2705,7 +2705,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2705 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2705 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2706 cqr->lpm = startdev->path_data.ppm; 2706 cqr->lpm = startdev->path_data.ppm;
2707 cqr->retries = 256; 2707 cqr->retries = 256;
2708 cqr->buildclk = get_clock(); 2708 cqr->buildclk = get_tod_clock();
2709 cqr->status = DASD_CQR_FILLED; 2709 cqr->status = DASD_CQR_FILLED;
2710 return cqr; 2710 return cqr;
2711} 2711}
@@ -2998,7 +2998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2998 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 2998 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
2999 cqr->lpm = startdev->path_data.ppm; 2999 cqr->lpm = startdev->path_data.ppm;
3000 cqr->retries = 256; 3000 cqr->retries = 256;
3001 cqr->buildclk = get_clock(); 3001 cqr->buildclk = get_tod_clock();
3002 cqr->status = DASD_CQR_FILLED; 3002 cqr->status = DASD_CQR_FILLED;
3003 return cqr; 3003 return cqr;
3004out_error: 3004out_error:
@@ -3201,7 +3201,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3201 cqr->expires = startdev->default_expires * HZ; 3201 cqr->expires = startdev->default_expires * HZ;
3202 cqr->lpm = startdev->path_data.ppm; 3202 cqr->lpm = startdev->path_data.ppm;
3203 cqr->retries = 256; 3203 cqr->retries = 256;
3204 cqr->buildclk = get_clock(); 3204 cqr->buildclk = get_tod_clock();
3205 cqr->status = DASD_CQR_FILLED; 3205 cqr->status = DASD_CQR_FILLED;
3206 3206
3207 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 3207 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
@@ -3402,7 +3402,7 @@ dasd_eckd_release(struct dasd_device *device)
3402 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3402 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3403 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3403 cqr->retries = 2; /* set retry counter to enable basic ERP */
3404 cqr->expires = 2 * HZ; 3404 cqr->expires = 2 * HZ;
3405 cqr->buildclk = get_clock(); 3405 cqr->buildclk = get_tod_clock();
3406 cqr->status = DASD_CQR_FILLED; 3406 cqr->status = DASD_CQR_FILLED;
3407 3407
3408 rc = dasd_sleep_on_immediatly(cqr); 3408 rc = dasd_sleep_on_immediatly(cqr);
@@ -3457,7 +3457,7 @@ dasd_eckd_reserve(struct dasd_device *device)
3457 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3457 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3458 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3458 cqr->retries = 2; /* set retry counter to enable basic ERP */
3459 cqr->expires = 2 * HZ; 3459 cqr->expires = 2 * HZ;
3460 cqr->buildclk = get_clock(); 3460 cqr->buildclk = get_tod_clock();
3461 cqr->status = DASD_CQR_FILLED; 3461 cqr->status = DASD_CQR_FILLED;
3462 3462
3463 rc = dasd_sleep_on_immediatly(cqr); 3463 rc = dasd_sleep_on_immediatly(cqr);
@@ -3511,7 +3511,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
3511 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 3511 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3512 cqr->retries = 2; /* set retry counter to enable basic ERP */ 3512 cqr->retries = 2; /* set retry counter to enable basic ERP */
3513 cqr->expires = 2 * HZ; 3513 cqr->expires = 2 * HZ;
3514 cqr->buildclk = get_clock(); 3514 cqr->buildclk = get_tod_clock();
3515 cqr->status = DASD_CQR_FILLED; 3515 cqr->status = DASD_CQR_FILLED;
3516 3516
3517 rc = dasd_sleep_on_immediatly(cqr); 3517 rc = dasd_sleep_on_immediatly(cqr);
@@ -3572,7 +3572,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
3572 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 3572 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
3573 cqr->retries = 5; 3573 cqr->retries = 5;
3574 cqr->expires = 10 * HZ; 3574 cqr->expires = 10 * HZ;
3575 cqr->buildclk = get_clock(); 3575 cqr->buildclk = get_tod_clock();
3576 cqr->status = DASD_CQR_FILLED; 3576 cqr->status = DASD_CQR_FILLED;
3577 cqr->lpm = usrparm.path_mask; 3577 cqr->lpm = usrparm.path_mask;
3578 3578
@@ -3642,7 +3642,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
3642 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 3642 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
3643 ccw->cda = (__u32)(addr_t) stats; 3643 ccw->cda = (__u32)(addr_t) stats;
3644 3644
3645 cqr->buildclk = get_clock(); 3645 cqr->buildclk = get_tod_clock();
3646 cqr->status = DASD_CQR_FILLED; 3646 cqr->status = DASD_CQR_FILLED;
3647 rc = dasd_sleep_on(cqr); 3647 rc = dasd_sleep_on(cqr);
3648 if (rc == 0) { 3648 if (rc == 0) {
@@ -3768,7 +3768,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
3768 cqr->memdev = device; 3768 cqr->memdev = device;
3769 cqr->retries = 3; 3769 cqr->retries = 3;
3770 cqr->expires = 10 * HZ; 3770 cqr->expires = 10 * HZ;
3771 cqr->buildclk = get_clock(); 3771 cqr->buildclk = get_tod_clock();
3772 cqr->status = DASD_CQR_FILLED; 3772 cqr->status = DASD_CQR_FILLED;
3773 3773
3774 /* Build the ccws */ 3774 /* Build the ccws */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index ff901b5509c1..21ef63cf0960 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -481,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device)
481 ccw->flags = 0; 481 ccw->flags = 0;
482 ccw->cda = (__u32)(addr_t) cqr->data; 482 ccw->cda = (__u32)(addr_t) cqr->data;
483 483
484 cqr->buildclk = get_clock(); 484 cqr->buildclk = get_tod_clock();
485 cqr->status = DASD_CQR_FILLED; 485 cqr->status = DASD_CQR_FILLED;
486 cqr->callback = dasd_eer_snss_cb; 486 cqr->callback = dasd_eer_snss_cb;
487 487
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d01ef82f8757..3250cb471f78 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -102,7 +102,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
102 pr_err("%s: default ERP has run out of retries and failed\n", 102 pr_err("%s: default ERP has run out of retries and failed\n",
103 dev_name(&device->cdev->dev)); 103 dev_name(&device->cdev->dev));
104 cqr->status = DASD_CQR_FAILED; 104 cqr->status = DASD_CQR_FAILED;
105 cqr->stopclk = get_clock(); 105 cqr->stopclk = get_tod_clock();
106 } 106 }
107 return cqr; 107 return cqr;
108} /* end dasd_default_erp_action */ 108} /* end dasd_default_erp_action */
@@ -146,7 +146,7 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
146 cqr->status = DASD_CQR_DONE; 146 cqr->status = DASD_CQR_DONE;
147 else { 147 else {
148 cqr->status = DASD_CQR_FAILED; 148 cqr->status = DASD_CQR_FAILED;
149 cqr->stopclk = get_clock(); 149 cqr->stopclk = get_tod_clock();
150 } 150 }
151 151
152 return cqr; 152 return cqr;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 414698584344..4dd0e2f6047e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -370,7 +370,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
370 cqr->block = block; 370 cqr->block = block;
371 cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ 371 cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
372 cqr->retries = 32; 372 cqr->retries = 32;
373 cqr->buildclk = get_clock(); 373 cqr->buildclk = get_tod_clock();
374 cqr->status = DASD_CQR_FILLED; 374 cqr->status = DASD_CQR_FILLED;
375 return cqr; 375 return cqr;
376} 376}
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 7ac6bad919ef..3c1ccf494647 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -68,19 +68,34 @@ void scm_initiate_cluster_request(struct scm_request *);
68void scm_cluster_request_irq(struct scm_request *); 68void scm_cluster_request_irq(struct scm_request *);
69bool scm_test_cluster_request(struct scm_request *); 69bool scm_test_cluster_request(struct scm_request *);
70bool scm_cluster_size_valid(void); 70bool scm_cluster_size_valid(void);
71#else 71#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
72#define __scm_free_rq_cluster(scmrq) {} 72static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
73#define __scm_alloc_rq_cluster(scmrq) 0 73static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
74#define scm_request_cluster_init(scmrq) {} 74{
75#define scm_reserve_cluster(scmrq) true 75 return 0;
76#define scm_release_cluster(scmrq) {} 76}
77#define scm_blk_dev_cluster_setup(bdev) {} 77static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
78#define scm_need_cluster_request(scmrq) false 78static inline bool scm_reserve_cluster(struct scm_request *scmrq)
79#define scm_initiate_cluster_request(scmrq) {} 79{
80#define scm_cluster_request_irq(scmrq) {} 80 return true;
81#define scm_test_cluster_request(scmrq) false 81}
82#define scm_cluster_size_valid() true 82static inline void scm_release_cluster(struct scm_request *scmrq) {}
83#endif 83static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
84static inline bool scm_need_cluster_request(struct scm_request *scmrq)
85{
86 return false;
87}
88static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
89static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
90static inline bool scm_test_cluster_request(struct scm_request *scmrq)
91{
92 return false;
93}
94static inline bool scm_cluster_size_valid(void)
95{
96 return true;
97}
98#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
84 99
85extern debug_info_t *scm_debug; 100extern debug_info_t *scm_debug;
86 101
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 911704571b9c..230697aac94b 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -443,7 +443,7 @@ fs3270_open(struct inode *inode, struct file *filp)
443 tty_kref_put(tty); 443 tty_kref_put(tty);
444 return -ENODEV; 444 return -ENODEV;
445 } 445 }
446 minor = tty->index + RAW3270_FIRSTMINOR; 446 minor = tty->index;
447 tty_kref_put(tty); 447 tty_kref_put(tty);
448 } 448 }
449 mutex_lock(&fs3270_mutex); 449 mutex_lock(&fs3270_mutex);
@@ -524,6 +524,25 @@ static const struct file_operations fs3270_fops = {
524 .llseek = no_llseek, 524 .llseek = no_llseek,
525}; 525};
526 526
527void fs3270_create_cb(int minor)
528{
529 __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
530 device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
531 NULL, "3270/tub%d", minor);
532}
533
534void fs3270_destroy_cb(int minor)
535{
536 device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
537 __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
538}
539
540struct raw3270_notifier fs3270_notifier =
541{
542 .create = fs3270_create_cb,
543 .destroy = fs3270_destroy_cb,
544};
545
527/* 546/*
528 * 3270 fullscreen driver initialization. 547 * 3270 fullscreen driver initialization.
529 */ 548 */
@@ -532,16 +551,20 @@ fs3270_init(void)
532{ 551{
533 int rc; 552 int rc;
534 553
535 rc = register_chrdev(IBM_FS3270_MAJOR, "fs3270", &fs3270_fops); 554 rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
536 if (rc) 555 if (rc)
537 return rc; 556 return rc;
557 device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
558 NULL, "3270/tub");
559 raw3270_register_notifier(&fs3270_notifier);
538 return 0; 560 return 0;
539} 561}
540 562
541static void __exit 563static void __exit
542fs3270_exit(void) 564fs3270_exit(void)
543{ 565{
544 unregister_chrdev(IBM_FS3270_MAJOR, "fs3270"); 566 raw3270_unregister_notifier(&fs3270_notifier);
567 __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
545} 568}
546 569
547MODULE_LICENSE("GPL"); 570MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 9a6c140c5f07..4c9030a5b9f2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,7 +28,7 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30 30
31static struct class *class3270; 31struct class *class3270;
32 32
33/* The main 3270 data structure. */ 33/* The main 3270 data structure. */
34struct raw3270 { 34struct raw3270 {
@@ -37,6 +37,7 @@ struct raw3270 {
37 int minor; 37 int minor;
38 38
39 short model, rows, cols; 39 short model, rows, cols;
40 unsigned int state;
40 unsigned long flags; 41 unsigned long flags;
41 42
42 struct list_head req_queue; /* Request queue. */ 43 struct list_head req_queue; /* Request queue. */
@@ -46,20 +47,26 @@ struct raw3270 {
46 struct timer_list timer; /* Device timer. */ 47 struct timer_list timer; /* Device timer. */
47 48
48 unsigned char *ascebc; /* ascii -> ebcdic table */ 49 unsigned char *ascebc; /* ascii -> ebcdic table */
49 struct device *clttydev; /* 3270-class tty device ptr */
50 struct device *cltubdev; /* 3270-class tub device ptr */
51 50
52 struct raw3270_request init_request; 51 struct raw3270_view init_view;
52 struct raw3270_request init_reset;
53 struct raw3270_request init_readpart;
54 struct raw3270_request init_readmod;
53 unsigned char init_data[256]; 55 unsigned char init_data[256];
54}; 56};
55 57
58/* raw3270->state */
59#define RAW3270_STATE_INIT 0 /* Initial state */
60#define RAW3270_STATE_RESET 1 /* Reset command is pending */
61#define RAW3270_STATE_W4ATTN 2 /* Wait for attention interrupt */
62#define RAW3270_STATE_READMOD 3 /* Read partition is pending */
63#define RAW3270_STATE_READY 4 /* Device is usable by views */
64
56/* raw3270->flags */ 65/* raw3270->flags */
57#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */ 66#define RAW3270_FLAGS_14BITADDR 0 /* 14-bit buffer addresses */
58#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */ 67#define RAW3270_FLAGS_BUSY 1 /* Device busy, leave it alone */
59#define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ 68#define RAW3270_FLAGS_CONSOLE 2 /* Device is the console. */
60#define RAW3270_FLAGS_READY 4 /* Device is useable by views */ 69#define RAW3270_FLAGS_FROZEN 3 /* set if 3270 is frozen for suspend */
61#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
62#define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */
63 70
64/* Semaphore to protect global data of raw3270 (devices, views, etc). */ 71/* Semaphore to protect global data of raw3270 (devices, views, etc). */
65static DEFINE_MUTEX(raw3270_mutex); 72static DEFINE_MUTEX(raw3270_mutex);
@@ -97,6 +104,17 @@ static unsigned char raw3270_ebcgraf[64] = {
97 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f 104 0xf8, 0xf9, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f
98}; 105};
99 106
107static inline int raw3270_state_ready(struct raw3270 *rp)
108{
109 return rp->state == RAW3270_STATE_READY;
110}
111
112static inline int raw3270_state_final(struct raw3270 *rp)
113{
114 return rp->state == RAW3270_STATE_INIT ||
115 rp->state == RAW3270_STATE_READY;
116}
117
100void 118void
101raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr) 119raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
102{ 120{
@@ -214,7 +232,7 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
214 * Stop running ccw. 232 * Stop running ccw.
215 */ 233 */
216static int 234static int
217raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq) 235__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
218{ 236{
219 int retries; 237 int retries;
220 int rc; 238 int rc;
@@ -233,18 +251,6 @@ raw3270_halt_io_nolock(struct raw3270 *rp, struct raw3270_request *rq)
233 return rc; 251 return rc;
234} 252}
235 253
236static int
237raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
238{
239 unsigned long flags;
240 int rc;
241
242 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
243 rc = raw3270_halt_io_nolock(rp, rq);
244 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
245 return rc;
246}
247
248/* 254/*
249 * Add the request to the request queue, try to start it if the 255 * Add the request to the request queue, try to start it if the
250 * 3270 device is idle. Return without waiting for end of i/o. 256 * 3270 device is idle. Return without waiting for end of i/o.
@@ -281,8 +287,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
281 if (!rp || rp->view != view || 287 if (!rp || rp->view != view ||
282 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 288 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
283 rc = -EACCES; 289 rc = -EACCES;
284 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 290 else if (!raw3270_state_ready(rp))
285 rc = -ENODEV; 291 rc = -EBUSY;
286 else 292 else
287 rc = __raw3270_start(rp, view, rq); 293 rc = __raw3270_start(rp, view, rq);
288 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 294 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
@@ -299,8 +305,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
299 if (!rp || rp->view != view || 305 if (!rp || rp->view != view ||
300 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 306 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
301 rc = -EACCES; 307 rc = -EACCES;
302 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 308 else if (!raw3270_state_ready(rp))
303 rc = -ENODEV; 309 rc = -EBUSY;
304 else 310 else
305 rc = __raw3270_start(rp, view, rq); 311 rc = __raw3270_start(rp, view, rq);
306 return rc; 312 return rc;
@@ -378,7 +384,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
378 case RAW3270_IO_STOP: 384 case RAW3270_IO_STOP:
379 if (!rq) 385 if (!rq)
380 break; 386 break;
381 raw3270_halt_io_nolock(rp, rq); 387 __raw3270_halt_io(rp, rq);
382 rq->rc = -EIO; 388 rq->rc = -EIO;
383 break; 389 break;
384 default: 390 default:
@@ -413,9 +419,14 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
413} 419}
414 420
415/* 421/*
416 * Size sensing. 422 * To determine the size of the 3270 device we need to do:
423 * 1) send a 'read partition' data stream to the device
424 * 2) wait for the attn interrupt that precedes the query reply
425 * 3) do a read modified to get the query reply
426 * To make things worse we have to cope with intervention
427 * required (3270 device switched to 'stand-by') and command
428 * rejects (old devices that can't do 'read partition').
417 */ 429 */
418
419struct raw3270_ua { /* Query Reply structure for Usable Area */ 430struct raw3270_ua { /* Query Reply structure for Usable Area */
420 struct { /* Usable Area Query Reply Base */ 431 struct { /* Usable Area Query Reply Base */
421 short l; /* Length of this structured field */ 432 short l; /* Length of this structured field */
@@ -451,117 +462,21 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
451 } __attribute__ ((packed)) aua; 462 } __attribute__ ((packed)) aua;
452} __attribute__ ((packed)); 463} __attribute__ ((packed));
453 464
454static struct diag210 raw3270_init_diag210;
455static DEFINE_MUTEX(raw3270_init_mutex);
456
457static int
458raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
459 struct irb *irb)
460{
461 /*
462 * Unit-Check Processing:
463 * Expect Command Reject or Intervention Required.
464 */
465 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
466 /* Request finished abnormally. */
467 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
468 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
469 return RAW3270_IO_BUSY;
470 }
471 }
472 if (rq) {
473 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
474 if (irb->ecw[0] & SNS0_CMD_REJECT)
475 rq->rc = -EOPNOTSUPP;
476 else
477 rq->rc = -EIO;
478 } else
479 /* Request finished normally. Copy residual count. */
480 rq->rescnt = irb->scsw.cmd.count;
481 }
482 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
483 set_bit(RAW3270_FLAGS_ATTN, &view->dev->flags);
484 wake_up(&raw3270_wait_queue);
485 }
486 return RAW3270_IO_DONE;
487}
488
489static struct raw3270_fn raw3270_init_fn = {
490 .intv = raw3270_init_irq
491};
492
493static struct raw3270_view raw3270_init_view = {
494 .fn = &raw3270_init_fn
495};
496
497/*
498 * raw3270_wait/raw3270_wait_interruptible/__raw3270_wakeup
499 * Wait for end of request. The request must have been started
500 * with raw3270_start, rc = 0. The device lock may NOT have been
501 * released between calling raw3270_start and raw3270_wait.
502 */
503static void 465static void
504raw3270_wake_init(struct raw3270_request *rq, void *data) 466raw3270_size_device_vm(struct raw3270 *rp)
505{
506 wake_up((wait_queue_head_t *) data);
507}
508
509/*
510 * Special wait function that can cope with console initialization.
511 */
512static int
513raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
514 struct raw3270_request *rq)
515{
516 unsigned long flags;
517 int rc;
518
519#ifdef CONFIG_TN3270_CONSOLE
520 if (raw3270_registered == 0) {
521 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
522 rq->callback = NULL;
523 rc = __raw3270_start(rp, view, rq);
524 if (rc == 0)
525 while (!raw3270_request_final(rq)) {
526 wait_cons_dev();
527 barrier();
528 }
529 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
530 return rq->rc;
531 }
532#endif
533 rq->callback = raw3270_wake_init;
534 rq->callback_data = &raw3270_wait_queue;
535 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
536 rc = __raw3270_start(rp, view, rq);
537 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
538 if (rc)
539 return rc;
540 /* Now wait for the completion. */
541 rc = wait_event_interruptible(raw3270_wait_queue,
542 raw3270_request_final(rq));
543 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
544 raw3270_halt_io(view->dev, rq);
545 /* No wait for the halt to complete. */
546 wait_event(raw3270_wait_queue, raw3270_request_final(rq));
547 return -ERESTARTSYS;
548 }
549 return rq->rc;
550}
551
552static int
553__raw3270_size_device_vm(struct raw3270 *rp)
554{ 467{
555 int rc, model; 468 int rc, model;
556 struct ccw_dev_id dev_id; 469 struct ccw_dev_id dev_id;
470 struct diag210 diag_data;
557 471
558 ccw_device_get_id(rp->cdev, &dev_id); 472 ccw_device_get_id(rp->cdev, &dev_id);
559 raw3270_init_diag210.vrdcdvno = dev_id.devno; 473 diag_data.vrdcdvno = dev_id.devno;
560 raw3270_init_diag210.vrdclen = sizeof(struct diag210); 474 diag_data.vrdclen = sizeof(struct diag210);
561 rc = diag210(&raw3270_init_diag210); 475 rc = diag210(&diag_data);
562 if (rc) 476 model = diag_data.vrdccrmd;
563 return rc; 477 /* Use default model 2 if the size could not be detected */
564 model = raw3270_init_diag210.vrdccrmd; 478 if (rc || model < 2 || model > 5)
479 model = 2;
565 switch (model) { 480 switch (model) {
566 case 2: 481 case 2:
567 rp->model = model; 482 rp->model = model;
@@ -583,77 +498,25 @@ __raw3270_size_device_vm(struct raw3270 *rp)
583 rp->rows = 27; 498 rp->rows = 27;
584 rp->cols = 132; 499 rp->cols = 132;
585 break; 500 break;
586 default:
587 rc = -EOPNOTSUPP;
588 break;
589 } 501 }
590 return rc;
591} 502}
592 503
593static int 504static void
594__raw3270_size_device(struct raw3270 *rp) 505raw3270_size_device(struct raw3270 *rp)
595{ 506{
596 static const unsigned char wbuf[] =
597 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
598 struct raw3270_ua *uap; 507 struct raw3270_ua *uap;
599 int rc;
600 508
601 /*
602 * To determine the size of the 3270 device we need to do:
603 * 1) send a 'read partition' data stream to the device
604 * 2) wait for the attn interrupt that precedes the query reply
605 * 3) do a read modified to get the query reply
606 * To make things worse we have to cope with intervention
607 * required (3270 device switched to 'stand-by') and command
608 * rejects (old devices that can't do 'read partition').
609 */
610 memset(&rp->init_request, 0, sizeof(rp->init_request));
611 memset(&rp->init_data, 0, 256);
612 /* Store 'read partition' data stream to init_data */
613 memcpy(&rp->init_data, wbuf, sizeof(wbuf));
614 INIT_LIST_HEAD(&rp->init_request.list);
615 rp->init_request.ccw.cmd_code = TC_WRITESF;
616 rp->init_request.ccw.flags = CCW_FLAG_SLI;
617 rp->init_request.ccw.count = sizeof(wbuf);
618 rp->init_request.ccw.cda = (__u32) __pa(&rp->init_data);
619
620 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
621 if (rc)
622 /* Check error cases: -ERESTARTSYS, -EIO and -EOPNOTSUPP */
623 return rc;
624
625 /* Wait for attention interrupt. */
626#ifdef CONFIG_TN3270_CONSOLE
627 if (raw3270_registered == 0) {
628 unsigned long flags;
629
630 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
631 while (!test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags))
632 wait_cons_dev();
633 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
634 } else
635#endif
636 rc = wait_event_interruptible(raw3270_wait_queue,
637 test_and_clear_bit(RAW3270_FLAGS_ATTN, &rp->flags));
638 if (rc)
639 return rc;
640
641 /*
642 * The device accepted the 'read partition' command. Now
643 * set up a read ccw and issue it.
644 */
645 rp->init_request.ccw.cmd_code = TC_READMOD;
646 rp->init_request.ccw.flags = CCW_FLAG_SLI;
647 rp->init_request.ccw.count = sizeof(rp->init_data);
648 rp->init_request.ccw.cda = (__u32) __pa(rp->init_data);
649 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request);
650 if (rc)
651 return rc;
652 /* Got a Query Reply */ 509 /* Got a Query Reply */
653 uap = (struct raw3270_ua *) (rp->init_data + 1); 510 uap = (struct raw3270_ua *) (rp->init_data + 1);
654 /* Paranoia check. */ 511 /* Paranoia check. */
655 if (rp->init_data[0] != 0x88 || uap->uab.qcode != 0x81) 512 if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
656 return -EOPNOTSUPP; 513 uap->uab.qcode != 0x81) {
514 /* Couldn't detect size. Use default model 2. */
515 rp->model = 2;
516 rp->rows = 24;
517 rp->cols = 80;
518 return;
519 }
657 /* Copy rows/columns of default Usable Area */ 520 /* Copy rows/columns of default Usable Area */
658 rp->rows = uap->uab.h; 521 rp->rows = uap->uab.h;
659 rp->cols = uap->uab.w; 522 rp->cols = uap->uab.w;
@@ -666,66 +529,131 @@ __raw3270_size_device(struct raw3270 *rp)
666 rp->rows = uap->aua.hauai; 529 rp->rows = uap->aua.hauai;
667 rp->cols = uap->aua.wauai; 530 rp->cols = uap->aua.wauai;
668 } 531 }
669 return 0; 532 /* Try to find a model. */
533 rp->model = 0;
534 if (rp->rows == 24 && rp->cols == 80)
535 rp->model = 2;
536 if (rp->rows == 32 && rp->cols == 80)
537 rp->model = 3;
538 if (rp->rows == 43 && rp->cols == 80)
539 rp->model = 4;
540 if (rp->rows == 27 && rp->cols == 132)
541 rp->model = 5;
670} 542}
671 543
672static int 544static void
673raw3270_size_device(struct raw3270 *rp) 545raw3270_size_device_done(struct raw3270 *rp)
674{ 546{
675 int rc; 547 struct raw3270_view *view;
676 548
677 mutex_lock(&raw3270_init_mutex);
678 rp->view = &raw3270_init_view;
679 raw3270_init_view.dev = rp;
680 if (MACHINE_IS_VM)
681 rc = __raw3270_size_device_vm(rp);
682 else
683 rc = __raw3270_size_device(rp);
684 raw3270_init_view.dev = NULL;
685 rp->view = NULL; 549 rp->view = NULL;
686 mutex_unlock(&raw3270_init_mutex); 550 rp->state = RAW3270_STATE_READY;
687 if (rc == 0) { /* Found something. */ 551 /* Notify views about new size */
688 /* Try to find a model. */ 552 list_for_each_entry(view, &rp->view_list, list)
689 rp->model = 0; 553 if (view->fn->resize)
690 if (rp->rows == 24 && rp->cols == 80) 554 view->fn->resize(view, rp->model, rp->rows, rp->cols);
691 rp->model = 2; 555 /* Setup processing done, now activate a view */
692 if (rp->rows == 32 && rp->cols == 80) 556 list_for_each_entry(view, &rp->view_list, list) {
693 rp->model = 3; 557 rp->view = view;
694 if (rp->rows == 43 && rp->cols == 80) 558 if (view->fn->activate(view) == 0)
695 rp->model = 4; 559 break;
696 if (rp->rows == 27 && rp->cols == 132) 560 rp->view = NULL;
697 rp->model = 5;
698 } else {
699 /* Couldn't detect size. Use default model 2. */
700 rp->model = 2;
701 rp->rows = 24;
702 rp->cols = 80;
703 return 0;
704 } 561 }
705 return rc; 562}
563
564static void
565raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
566{
567 struct raw3270 *rp = rq->view->dev;
568
569 raw3270_size_device(rp);
570 raw3270_size_device_done(rp);
571}
572
573static void
574raw3270_read_modified(struct raw3270 *rp)
575{
576 if (rp->state != RAW3270_STATE_W4ATTN)
577 return;
578 /* Use 'read modified' to get the result of a read partition. */
579 memset(&rp->init_readmod, 0, sizeof(rp->init_readmod));
580 memset(&rp->init_data, 0, sizeof(rp->init_data));
581 rp->init_readmod.ccw.cmd_code = TC_READMOD;
582 rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
583 rp->init_readmod.ccw.count = sizeof(rp->init_data);
584 rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
585 rp->init_readmod.callback = raw3270_read_modified_cb;
586 rp->state = RAW3270_STATE_READMOD;
587 raw3270_start_irq(&rp->init_view, &rp->init_readmod);
588}
589
590static void
591raw3270_writesf_readpart(struct raw3270 *rp)
592{
593 static const unsigned char wbuf[] =
594 { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
595
596 /* Store 'read partition' data stream to init_data */
597 memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
598 memset(&rp->init_data, 0, sizeof(rp->init_data));
599 memcpy(&rp->init_data, wbuf, sizeof(wbuf));
600 rp->init_readpart.ccw.cmd_code = TC_WRITESF;
601 rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
602 rp->init_readpart.ccw.count = sizeof(wbuf);
603 rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
604 rp->state = RAW3270_STATE_W4ATTN;
605 raw3270_start_irq(&rp->init_view, &rp->init_readpart);
606}
607
608/*
609 * Device reset
610 */
611static void
612raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
613{
614 struct raw3270 *rp = rq->view->dev;
615
616 if (rp->state != RAW3270_STATE_RESET)
617 return;
618 if (rq && rq->rc) {
619 /* Reset command failed. */
620 rp->state = RAW3270_STATE_INIT;
621 } else if (0 && MACHINE_IS_VM) {
622 raw3270_size_device_vm(rp);
623 raw3270_size_device_done(rp);
624 } else
625 raw3270_writesf_readpart(rp);
706} 626}
707 627
708static int 628static int
709raw3270_reset_device(struct raw3270 *rp) 629__raw3270_reset_device(struct raw3270 *rp)
710{ 630{
711 int rc; 631 int rc;
712 632
713 mutex_lock(&raw3270_init_mutex); 633 /* Store reset data stream to init_data/init_reset */
714 memset(&rp->init_request, 0, sizeof(rp->init_request)); 634 memset(&rp->init_reset, 0, sizeof(rp->init_reset));
715 memset(&rp->init_data, 0, sizeof(rp->init_data)); 635 memset(&rp->init_data, 0, sizeof(rp->init_data));
716 /* Store reset data stream to init_data/init_request */
717 rp->init_data[0] = TW_KR; 636 rp->init_data[0] = TW_KR;
718 INIT_LIST_HEAD(&rp->init_request.list); 637 rp->init_reset.ccw.cmd_code = TC_EWRITEA;
719 rp->init_request.ccw.cmd_code = TC_EWRITEA; 638 rp->init_reset.ccw.flags = CCW_FLAG_SLI;
720 rp->init_request.ccw.flags = CCW_FLAG_SLI; 639 rp->init_reset.ccw.count = 1;
721 rp->init_request.ccw.count = 1; 640 rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
722 rp->init_request.ccw.cda = (__u32) __pa(rp->init_data); 641 rp->init_reset.callback = raw3270_reset_device_cb;
723 rp->view = &raw3270_init_view; 642 rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
724 raw3270_init_view.dev = rp; 643 if (rc == 0 && rp->state == RAW3270_STATE_INIT)
725 rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); 644 rp->state = RAW3270_STATE_RESET;
726 raw3270_init_view.dev = NULL; 645 return rc;
727 rp->view = NULL; 646}
728 mutex_unlock(&raw3270_init_mutex); 647
648static int
649raw3270_reset_device(struct raw3270 *rp)
650{
651 unsigned long flags;
652 int rc;
653
654 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
655 rc = __raw3270_reset_device(rp);
656 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
729 return rc; 657 return rc;
730} 658}
731 659
@@ -739,13 +667,50 @@ raw3270_reset(struct raw3270_view *view)
739 if (!rp || rp->view != view || 667 if (!rp || rp->view != view ||
740 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 668 test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
741 rc = -EACCES; 669 rc = -EACCES;
742 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 670 else if (!raw3270_state_ready(rp))
743 rc = -ENODEV; 671 rc = -EBUSY;
744 else 672 else
745 rc = raw3270_reset_device(view->dev); 673 rc = raw3270_reset_device(view->dev);
746 return rc; 674 return rc;
747} 675}
748 676
677static int
678raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
679 struct irb *irb)
680{
681 struct raw3270 *rp;
682
683 /*
684 * Unit-Check Processing:
685 * Expect Command Reject or Intervention Required.
686 */
687 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
688 /* Request finished abnormally. */
689 if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
690 set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
691 return RAW3270_IO_BUSY;
692 }
693 }
694 if (rq) {
695 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
696 if (irb->ecw[0] & SNS0_CMD_REJECT)
697 rq->rc = -EOPNOTSUPP;
698 else
699 rq->rc = -EIO;
700 }
701 }
702 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
703 /* Queue read modified after attention interrupt */
704 rp = view->dev;
705 raw3270_read_modified(rp);
706 }
707 return RAW3270_IO_DONE;
708}
709
710static struct raw3270_fn raw3270_init_fn = {
711 .intv = raw3270_init_irq
712};
713
749/* 714/*
750 * Setup new 3270 device. 715 * Setup new 3270 device.
751 */ 716 */
@@ -774,6 +739,10 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
774 INIT_LIST_HEAD(&rp->req_queue); 739 INIT_LIST_HEAD(&rp->req_queue);
775 INIT_LIST_HEAD(&rp->view_list); 740 INIT_LIST_HEAD(&rp->view_list);
776 741
742 rp->init_view.dev = rp;
743 rp->init_view.fn = &raw3270_init_fn;
744 rp->view = &rp->init_view;
745
777 /* 746 /*
778 * Add device to list and find the smallest unused minor 747 * Add device to list and find the smallest unused minor
779 * number for it. Note: there is no device with minor 0, 748 * number for it. Note: there is no device with minor 0,
@@ -812,6 +781,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
812 */ 781 */
813struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) 782struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
814{ 783{
784 unsigned long flags;
815 struct raw3270 *rp; 785 struct raw3270 *rp;
816 char *ascebc; 786 char *ascebc;
817 int rc; 787 int rc;
@@ -822,16 +792,15 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
822 if (rc) 792 if (rc)
823 return ERR_PTR(rc); 793 return ERR_PTR(rc);
824 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags); 794 set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
825 rc = raw3270_reset_device(rp); 795 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
826 if (rc) 796 do {
827 return ERR_PTR(rc); 797 __raw3270_reset_device(rp);
828 rc = raw3270_size_device(rp); 798 while (!raw3270_state_final(rp)) {
829 if (rc) 799 wait_cons_dev();
830 return ERR_PTR(rc); 800 barrier();
831 rc = raw3270_reset_device(rp); 801 }
832 if (rc) 802 } while (rp->state != RAW3270_STATE_READY);
833 return ERR_PTR(rc); 803 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
834 set_bit(RAW3270_FLAGS_READY, &rp->flags);
835 return rp; 804 return rp;
836} 805}
837 806
@@ -893,13 +862,13 @@ raw3270_activate_view(struct raw3270_view *view)
893 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 862 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
894 if (rp->view == view) 863 if (rp->view == view)
895 rc = 0; 864 rc = 0;
896 else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) 865 else if (!raw3270_state_ready(rp))
897 rc = -ENODEV; 866 rc = -EBUSY;
898 else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 867 else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
899 rc = -EACCES; 868 rc = -EACCES;
900 else { 869 else {
901 oldview = NULL; 870 oldview = NULL;
902 if (rp->view) { 871 if (rp->view && rp->view->fn->deactivate) {
903 oldview = rp->view; 872 oldview = rp->view;
904 oldview->fn->deactivate(oldview); 873 oldview->fn->deactivate(oldview);
905 } 874 }
@@ -944,7 +913,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
944 list_del_init(&view->list); 913 list_del_init(&view->list);
945 list_add_tail(&view->list, &rp->view_list); 914 list_add_tail(&view->list, &rp->view_list);
946 /* Try to activate another view. */ 915 /* Try to activate another view. */
947 if (test_bit(RAW3270_FLAGS_READY, &rp->flags) && 916 if (raw3270_state_ready(rp) &&
948 !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { 917 !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
949 list_for_each_entry(view, &rp->view_list, list) { 918 list_for_each_entry(view, &rp->view_list, list) {
950 rp->view = view; 919 rp->view = view;
@@ -975,18 +944,16 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
975 if (rp->minor != minor) 944 if (rp->minor != minor)
976 continue; 945 continue;
977 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 946 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
978 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 947 atomic_set(&view->ref_count, 2);
979 atomic_set(&view->ref_count, 2); 948 view->dev = rp;
980 view->dev = rp; 949 view->fn = fn;
981 view->fn = fn; 950 view->model = rp->model;
982 view->model = rp->model; 951 view->rows = rp->rows;
983 view->rows = rp->rows; 952 view->cols = rp->cols;
984 view->cols = rp->cols; 953 view->ascebc = rp->ascebc;
985 view->ascebc = rp->ascebc; 954 spin_lock_init(&view->lock);
986 spin_lock_init(&view->lock); 955 list_add(&view->list, &rp->view_list);
987 list_add(&view->list, &rp->view_list); 956 rc = 0;
988 rc = 0;
989 }
990 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 957 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
991 break; 958 break;
992 } 959 }
@@ -1010,14 +977,11 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
1010 if (rp->minor != minor) 977 if (rp->minor != minor)
1011 continue; 978 continue;
1012 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 979 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1013 if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { 980 list_for_each_entry(tmp, &rp->view_list, list) {
1014 view = ERR_PTR(-ENOENT); 981 if (tmp->fn == fn) {
1015 list_for_each_entry(tmp, &rp->view_list, list) { 982 raw3270_get_view(tmp);
1016 if (tmp->fn == fn) { 983 view = tmp;
1017 raw3270_get_view(tmp); 984 break;
1018 view = tmp;
1019 break;
1020 }
1021 } 985 }
1022 } 986 }
1023 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 987 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
@@ -1044,7 +1008,7 @@ raw3270_del_view(struct raw3270_view *view)
1044 rp->view = NULL; 1008 rp->view = NULL;
1045 } 1009 }
1046 list_del_init(&view->list); 1010 list_del_init(&view->list);
1047 if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) && 1011 if (!rp->view && raw3270_state_ready(rp) &&
1048 !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { 1012 !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) {
1049 /* Try to activate another view. */ 1013 /* Try to activate another view. */
1050 list_for_each_entry(nv, &rp->view_list, list) { 1014 list_for_each_entry(nv, &rp->view_list, list) {
@@ -1072,10 +1036,6 @@ raw3270_delete_device(struct raw3270 *rp)
1072 1036
1073 /* Remove from device chain. */ 1037 /* Remove from device chain. */
1074 mutex_lock(&raw3270_mutex); 1038 mutex_lock(&raw3270_mutex);
1075 if (rp->clttydev && !IS_ERR(rp->clttydev))
1076 device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1077 if (rp->cltubdev && !IS_ERR(rp->cltubdev))
1078 device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, rp->minor));
1079 list_del_init(&rp->list); 1039 list_del_init(&rp->list);
1080 mutex_unlock(&raw3270_mutex); 1040 mutex_unlock(&raw3270_mutex);
1081 1041
@@ -1139,75 +1099,34 @@ static struct attribute_group raw3270_attr_group = {
1139 1099
1140static int raw3270_create_attributes(struct raw3270 *rp) 1100static int raw3270_create_attributes(struct raw3270 *rp)
1141{ 1101{
1142 int rc; 1102 return sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1143
1144 rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1145 if (rc)
1146 goto out;
1147
1148 rp->clttydev = device_create(class3270, &rp->cdev->dev,
1149 MKDEV(IBM_TTY3270_MAJOR, rp->minor), NULL,
1150 "tty%s", dev_name(&rp->cdev->dev));
1151 if (IS_ERR(rp->clttydev)) {
1152 rc = PTR_ERR(rp->clttydev);
1153 goto out_ttydev;
1154 }
1155
1156 rp->cltubdev = device_create(class3270, &rp->cdev->dev,
1157 MKDEV(IBM_FS3270_MAJOR, rp->minor), NULL,
1158 "tub%s", dev_name(&rp->cdev->dev));
1159 if (!IS_ERR(rp->cltubdev))
1160 goto out;
1161
1162 rc = PTR_ERR(rp->cltubdev);
1163 device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor));
1164
1165out_ttydev:
1166 sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group);
1167out:
1168 return rc;
1169} 1103}
1170 1104
1171/* 1105/*
1172 * Notifier for device addition/removal 1106 * Notifier for device addition/removal
1173 */ 1107 */
1174struct raw3270_notifier {
1175 struct list_head list;
1176 void (*notifier)(int, int);
1177};
1178
1179static LIST_HEAD(raw3270_notifier); 1108static LIST_HEAD(raw3270_notifier);
1180 1109
1181int raw3270_register_notifier(void (*notifier)(int, int)) 1110int raw3270_register_notifier(struct raw3270_notifier *notifier)
1182{ 1111{
1183 struct raw3270_notifier *np;
1184 struct raw3270 *rp; 1112 struct raw3270 *rp;
1185 1113
1186 np = kmalloc(sizeof(struct raw3270_notifier), GFP_KERNEL);
1187 if (!np)
1188 return -ENOMEM;
1189 np->notifier = notifier;
1190 mutex_lock(&raw3270_mutex); 1114 mutex_lock(&raw3270_mutex);
1191 list_add_tail(&np->list, &raw3270_notifier); 1115 list_add_tail(&notifier->list, &raw3270_notifier);
1192 list_for_each_entry(rp, &raw3270_devices, list) { 1116 list_for_each_entry(rp, &raw3270_devices, list)
1193 get_device(&rp->cdev->dev); 1117 notifier->create(rp->minor);
1194 notifier(rp->minor, 1);
1195 }
1196 mutex_unlock(&raw3270_mutex); 1118 mutex_unlock(&raw3270_mutex);
1197 return 0; 1119 return 0;
1198} 1120}
1199 1121
1200void raw3270_unregister_notifier(void (*notifier)(int, int)) 1122void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
1201{ 1123{
1202 struct raw3270_notifier *np; 1124 struct raw3270 *rp;
1203 1125
1204 mutex_lock(&raw3270_mutex); 1126 mutex_lock(&raw3270_mutex);
1205 list_for_each_entry(np, &raw3270_notifier, list) 1127 list_for_each_entry(rp, &raw3270_devices, list)
1206 if (np->notifier == notifier) { 1128 notifier->destroy(rp->minor);
1207 list_del(&np->list); 1129 list_del(&notifier->list);
1208 kfree(np);
1209 break;
1210 }
1211 mutex_unlock(&raw3270_mutex); 1130 mutex_unlock(&raw3270_mutex);
1212} 1131}
1213 1132
@@ -1217,29 +1136,20 @@ void raw3270_unregister_notifier(void (*notifier)(int, int))
1217static int 1136static int
1218raw3270_set_online (struct ccw_device *cdev) 1137raw3270_set_online (struct ccw_device *cdev)
1219{ 1138{
1220 struct raw3270 *rp;
1221 struct raw3270_notifier *np; 1139 struct raw3270_notifier *np;
1140 struct raw3270 *rp;
1222 int rc; 1141 int rc;
1223 1142
1224 rp = raw3270_create_device(cdev); 1143 rp = raw3270_create_device(cdev);
1225 if (IS_ERR(rp)) 1144 if (IS_ERR(rp))
1226 return PTR_ERR(rp); 1145 return PTR_ERR(rp);
1227 rc = raw3270_reset_device(rp);
1228 if (rc)
1229 goto failure;
1230 rc = raw3270_size_device(rp);
1231 if (rc)
1232 goto failure;
1233 rc = raw3270_reset_device(rp);
1234 if (rc)
1235 goto failure;
1236 rc = raw3270_create_attributes(rp); 1146 rc = raw3270_create_attributes(rp);
1237 if (rc) 1147 if (rc)
1238 goto failure; 1148 goto failure;
1239 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1149 raw3270_reset_device(rp);
1240 mutex_lock(&raw3270_mutex); 1150 mutex_lock(&raw3270_mutex);
1241 list_for_each_entry(np, &raw3270_notifier, list) 1151 list_for_each_entry(np, &raw3270_notifier, list)
1242 np->notifier(rp->minor, 1); 1152 np->create(rp->minor);
1243 mutex_unlock(&raw3270_mutex); 1153 mutex_unlock(&raw3270_mutex);
1244 return 0; 1154 return 0;
1245 1155
@@ -1268,14 +1178,14 @@ raw3270_remove (struct ccw_device *cdev)
1268 */ 1178 */
1269 if (rp == NULL) 1179 if (rp == NULL)
1270 return; 1180 return;
1271 clear_bit(RAW3270_FLAGS_READY, &rp->flags);
1272 1181
1273 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group); 1182 sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
1274 1183
1275 /* Deactivate current view and remove all views. */ 1184 /* Deactivate current view and remove all views. */
1276 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1185 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1277 if (rp->view) { 1186 if (rp->view) {
1278 rp->view->fn->deactivate(rp->view); 1187 if (rp->view->fn->deactivate)
1188 rp->view->fn->deactivate(rp->view);
1279 rp->view = NULL; 1189 rp->view = NULL;
1280 } 1190 }
1281 while (!list_empty(&rp->view_list)) { 1191 while (!list_empty(&rp->view_list)) {
@@ -1290,7 +1200,7 @@ raw3270_remove (struct ccw_device *cdev)
1290 1200
1291 mutex_lock(&raw3270_mutex); 1201 mutex_lock(&raw3270_mutex);
1292 list_for_each_entry(np, &raw3270_notifier, list) 1202 list_for_each_entry(np, &raw3270_notifier, list)
1293 np->notifier(rp->minor, 0); 1203 np->destroy(rp->minor);
1294 mutex_unlock(&raw3270_mutex); 1204 mutex_unlock(&raw3270_mutex);
1295 1205
1296 /* Reset 3270 device. */ 1206 /* Reset 3270 device. */
@@ -1324,7 +1234,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
1324 if (!rp) 1234 if (!rp)
1325 return 0; 1235 return 0;
1326 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1236 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1327 if (rp->view) 1237 if (rp->view && rp->view->fn->deactivate)
1328 rp->view->fn->deactivate(rp->view); 1238 rp->view->fn->deactivate(rp->view);
1329 if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) { 1239 if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) {
1330 /* 1240 /*
@@ -1351,7 +1261,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
1351 return 0; 1261 return 0;
1352 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); 1262 spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
1353 clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags); 1263 clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
1354 if (rp->view) 1264 if (rp->view && rp->view->fn->activate)
1355 rp->view->fn->activate(rp->view); 1265 rp->view->fn->activate(rp->view);
1356 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1266 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1357 return 0; 1267 return 0;
@@ -1434,6 +1344,7 @@ MODULE_LICENSE("GPL");
1434module_init(raw3270_init); 1344module_init(raw3270_init);
1435module_exit(raw3270_exit); 1345module_exit(raw3270_exit);
1436 1346
1347EXPORT_SYMBOL(class3270);
1437EXPORT_SYMBOL(raw3270_request_alloc); 1348EXPORT_SYMBOL(raw3270_request_alloc);
1438EXPORT_SYMBOL(raw3270_request_free); 1349EXPORT_SYMBOL(raw3270_request_free);
1439EXPORT_SYMBOL(raw3270_request_reset); 1350EXPORT_SYMBOL(raw3270_request_reset);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index ed34eb2199cc..7b73ff8c1bd7 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -91,6 +91,7 @@ struct raw3270_iocb {
91 91
92struct raw3270; 92struct raw3270;
93struct raw3270_view; 93struct raw3270_view;
94extern struct class *class3270;
94 95
95/* 3270 CCW request */ 96/* 3270 CCW request */
96struct raw3270_request { 97struct raw3270_request {
@@ -140,6 +141,7 @@ struct raw3270_fn {
140 struct raw3270_request *, struct irb *); 141 struct raw3270_request *, struct irb *);
141 void (*release)(struct raw3270_view *); 142 void (*release)(struct raw3270_view *);
142 void (*free)(struct raw3270_view *); 143 void (*free)(struct raw3270_view *);
144 void (*resize)(struct raw3270_view *, int, int, int);
143}; 145};
144 146
145/* 147/*
@@ -192,8 +194,14 @@ struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
192void raw3270_wait_cons_dev(struct raw3270 *); 194void raw3270_wait_cons_dev(struct raw3270 *);
193 195
194/* Notifier for device addition/removal */ 196/* Notifier for device addition/removal */
195int raw3270_register_notifier(void (*notifier)(int, int)); 197struct raw3270_notifier {
196void raw3270_unregister_notifier(void (*notifier)(int, int)); 198 struct list_head list;
199 void (*create)(int minor);
200 void (*destroy)(int minor);
201};
202
203int raw3270_register_notifier(struct raw3270_notifier *);
204void raw3270_unregister_notifier(struct raw3270_notifier *);
197void raw3270_pm_unfreeze(struct raw3270_view *); 205void raw3270_pm_unfreeze(struct raw3270_view *);
198 206
199/* 207/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 12c16a65dd25..bd6871bf545a 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -450,7 +450,7 @@ sclp_sync_wait(void)
450 timeout = 0; 450 timeout = 0;
451 if (timer_pending(&sclp_request_timer)) { 451 if (timer_pending(&sclp_request_timer)) {
452 /* Get timeout TOD value */ 452 /* Get timeout TOD value */
453 timeout = get_clock() + 453 timeout = get_tod_clock() +
454 sclp_tod_from_jiffies(sclp_request_timer.expires - 454 sclp_tod_from_jiffies(sclp_request_timer.expires -
455 jiffies); 455 jiffies);
456 } 456 }
@@ -472,7 +472,7 @@ sclp_sync_wait(void)
472 while (sclp_running_state != sclp_running_state_idle) { 472 while (sclp_running_state != sclp_running_state_idle) {
473 /* Check for expired request timer */ 473 /* Check for expired request timer */
474 if (timer_pending(&sclp_request_timer) && 474 if (timer_pending(&sclp_request_timer) &&
475 get_clock() > timeout && 475 get_tod_clock() > timeout &&
476 del_timer(&sclp_request_timer)) 476 del_timer(&sclp_request_timer))
477 sclp_request_timer.function(sclp_request_timer.data); 477 sclp_request_timer.function(sclp_request_timer.data);
478 cpu_relax(); 478 cpu_relax();
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index c44d13f607bc..30a2255389e5 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -56,7 +56,6 @@ static int __initdata early_read_info_sccb_valid;
56 56
57u64 sclp_facilities; 57u64 sclp_facilities;
58static u8 sclp_fac84; 58static u8 sclp_fac84;
59static u8 sclp_fac85;
60static unsigned long long rzm; 59static unsigned long long rzm;
61static unsigned long long rnmax; 60static unsigned long long rnmax;
62 61
@@ -131,7 +130,8 @@ void __init sclp_facilities_detect(void)
131 sccb = &early_read_info_sccb; 130 sccb = &early_read_info_sccb;
132 sclp_facilities = sccb->facilities; 131 sclp_facilities = sccb->facilities;
133 sclp_fac84 = sccb->fac84; 132 sclp_fac84 = sccb->fac84;
134 sclp_fac85 = sccb->fac85; 133 if (sccb->fac85 & 0x02)
134 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
135 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 135 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
136 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 136 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
137 rzm <<= 20; 137 rzm <<= 20;
@@ -171,12 +171,6 @@ unsigned long long sclp_get_rzm(void)
171 return rzm; 171 return rzm;
172} 172}
173 173
174u8 sclp_get_fac85(void)
175{
176 return sclp_fac85;
177}
178EXPORT_SYMBOL_GPL(sclp_get_fac85);
179
180/* 174/*
181 * This function will be called after sclp_facilities_detect(), which gets 175 * This function will be called after sclp_facilities_detect(), which gets
182 * called from early.c code. Therefore the sccb should have valid contents. 176 * called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3860e796b65f..b907dba24025 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/console.h> 16#include <linux/console.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/workqueue.h>
18 19
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/bootmem.h> 21#include <linux/bootmem.h>
@@ -80,6 +81,8 @@ struct tty3270 {
80 unsigned int highlight; /* Blink/reverse/underscore */ 81 unsigned int highlight; /* Blink/reverse/underscore */
81 unsigned int f_color; /* Foreground color */ 82 unsigned int f_color; /* Foreground color */
82 struct tty3270_line *screen; 83 struct tty3270_line *screen;
84 unsigned int n_model, n_cols, n_rows; /* New model & size */
85 struct work_struct resize_work;
83 86
84 /* Input stuff. */ 87 /* Input stuff. */
85 struct string *prompt; /* Output string for input area. */ 88 struct string *prompt; /* Output string for input area. */
@@ -115,6 +118,7 @@ struct tty3270 {
115#define TTY_UPDATE_ALL 16 /* Recreate screen. */ 118#define TTY_UPDATE_ALL 16 /* Recreate screen. */
116 119
117static void tty3270_update(struct tty3270 *); 120static void tty3270_update(struct tty3270 *);
121static void tty3270_resize_work(struct work_struct *work);
118 122
119/* 123/*
120 * Setup timeout for a device. On timeout trigger an update. 124 * Setup timeout for a device. On timeout trigger an update.
@@ -683,12 +687,6 @@ tty3270_alloc_view(void)
683 INIT_LIST_HEAD(&tp->update); 687 INIT_LIST_HEAD(&tp->update);
684 INIT_LIST_HEAD(&tp->rcl_lines); 688 INIT_LIST_HEAD(&tp->rcl_lines);
685 tp->rcl_max = 20; 689 tp->rcl_max = 20;
686 tty_port_init(&tp->port);
687 setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
688 (unsigned long) tp);
689 tasklet_init(&tp->readlet,
690 (void (*)(unsigned long)) tty3270_read_tasklet,
691 (unsigned long) tp->read);
692 690
693 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) { 691 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
694 tp->freemem_pages[pages] = (void *) 692 tp->freemem_pages[pages] = (void *)
@@ -710,6 +708,15 @@ tty3270_alloc_view(void)
710 tp->kbd = kbd_alloc(); 708 tp->kbd = kbd_alloc();
711 if (!tp->kbd) 709 if (!tp->kbd)
712 goto out_reset; 710 goto out_reset;
711
712 tty_port_init(&tp->port);
713 setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
714 (unsigned long) tp);
715 tasklet_init(&tp->readlet,
716 (void (*)(unsigned long)) tty3270_read_tasklet,
717 (unsigned long) tp->read);
718 INIT_WORK(&tp->resize_work, tty3270_resize_work);
719
713 return tp; 720 return tp;
714 721
715out_reset: 722out_reset:
@@ -752,42 +759,96 @@ tty3270_free_view(struct tty3270 *tp)
752/* 759/*
753 * Allocate tty3270 screen. 760 * Allocate tty3270 screen.
754 */ 761 */
755static int 762static struct tty3270_line *
756tty3270_alloc_screen(struct tty3270 *tp) 763tty3270_alloc_screen(unsigned int rows, unsigned int cols)
757{ 764{
765 struct tty3270_line *screen;
758 unsigned long size; 766 unsigned long size;
759 int lines; 767 int lines;
760 768
761 size = sizeof(struct tty3270_line) * (tp->view.rows - 2); 769 size = sizeof(struct tty3270_line) * (rows - 2);
762 tp->screen = kzalloc(size, GFP_KERNEL); 770 screen = kzalloc(size, GFP_KERNEL);
763 if (!tp->screen) 771 if (!screen)
764 goto out_err; 772 goto out_err;
765 for (lines = 0; lines < tp->view.rows - 2; lines++) { 773 for (lines = 0; lines < rows - 2; lines++) {
766 size = sizeof(struct tty3270_cell) * tp->view.cols; 774 size = sizeof(struct tty3270_cell) * cols;
767 tp->screen[lines].cells = kzalloc(size, GFP_KERNEL); 775 screen[lines].cells = kzalloc(size, GFP_KERNEL);
768 if (!tp->screen[lines].cells) 776 if (!screen[lines].cells)
769 goto out_screen; 777 goto out_screen;
770 } 778 }
771 return 0; 779 return screen;
772out_screen: 780out_screen:
773 while (lines--) 781 while (lines--)
774 kfree(tp->screen[lines].cells); 782 kfree(screen[lines].cells);
775 kfree(tp->screen); 783 kfree(screen);
776out_err: 784out_err:
777 return -ENOMEM; 785 return ERR_PTR(-ENOMEM);
778} 786}
779 787
780/* 788/*
781 * Free tty3270 screen. 789 * Free tty3270 screen.
782 */ 790 */
783static void 791static void
784tty3270_free_screen(struct tty3270 *tp) 792tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
785{ 793{
786 int lines; 794 int lines;
787 795
788 for (lines = 0; lines < tp->view.rows - 2; lines++) 796 for (lines = 0; lines < rows - 2; lines++)
789 kfree(tp->screen[lines].cells); 797 kfree(screen[lines].cells);
790 kfree(tp->screen); 798 kfree(screen);
799}
800
801/*
802 * Resize tty3270 screen
803 */
804static void tty3270_resize_work(struct work_struct *work)
805{
806 struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
807 struct tty3270_line *screen, *oscreen;
808 struct tty_struct *tty;
809 unsigned int orows;
810 struct winsize ws;
811
812 screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
813 if (!screen)
814 return;
815 /* Switch to new output size */
816 spin_lock_bh(&tp->view.lock);
817 oscreen = tp->screen;
818 orows = tp->view.rows;
819 tp->view.model = tp->n_model;
820 tp->view.rows = tp->n_rows;
821 tp->view.cols = tp->n_cols;
822 tp->screen = screen;
823 free_string(&tp->freemem, tp->prompt);
824 free_string(&tp->freemem, tp->status);
825 tty3270_create_prompt(tp);
826 tty3270_create_status(tp);
827 tp->nr_up = 0;
828 while (tp->nr_lines < tp->view.rows - 2)
829 tty3270_blank_line(tp);
830 tp->update_flags = TTY_UPDATE_ALL;
831 spin_unlock_bh(&tp->view.lock);
832 tty3270_free_screen(oscreen, orows);
833 tty3270_set_timer(tp, 1);
834 /* Informat tty layer about new size */
835 tty = tty_port_tty_get(&tp->port);
836 if (!tty)
837 return;
838 ws.ws_row = tp->view.rows - 2;
839 ws.ws_col = tp->view.cols;
840 tty_do_resize(tty, &ws);
841}
842
843static void
844tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
845{
846 struct tty3270 *tp = container_of(view, struct tty3270, view);
847
848 tp->n_model = model;
849 tp->n_rows = rows;
850 tp->n_cols = cols;
851 schedule_work(&tp->resize_work);
791} 852}
792 853
793/* 854/*
@@ -815,7 +876,8 @@ static void
815tty3270_free(struct raw3270_view *view) 876tty3270_free(struct raw3270_view *view)
816{ 877{
817 struct tty3270 *tp = container_of(view, struct tty3270, view); 878 struct tty3270 *tp = container_of(view, struct tty3270, view);
818 tty3270_free_screen(tp); 879
880 tty3270_free_screen(tp->screen, tp->view.rows);
819 tty3270_free_view(tp); 881 tty3270_free_view(tp);
820} 882}
821 883
@@ -827,9 +889,8 @@ tty3270_del_views(void)
827{ 889{
828 int i; 890 int i;
829 891
830 for (i = 0; i < tty3270_max_index; i++) { 892 for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
831 struct raw3270_view *view = 893 struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
832 raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
833 if (!IS_ERR(view)) 894 if (!IS_ERR(view))
834 raw3270_del_view(view); 895 raw3270_del_view(view);
835 } 896 }
@@ -840,7 +901,8 @@ static struct raw3270_fn tty3270_fn = {
840 .deactivate = tty3270_deactivate, 901 .deactivate = tty3270_deactivate,
841 .intv = (void *) tty3270_irq, 902 .intv = (void *) tty3270_irq,
842 .release = tty3270_release, 903 .release = tty3270_release,
843 .free = tty3270_free 904 .free = tty3270_free,
905 .resize = tty3270_resize
844}; 906};
845 907
846/* 908/*
@@ -853,8 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
853 int i, rc; 915 int i, rc;
854 916
855 /* Check if the tty3270 is already there. */ 917 /* Check if the tty3270 is already there. */
856 view = raw3270_find_view(&tty3270_fn, 918 view = raw3270_find_view(&tty3270_fn, tty->index);
857 tty->index + RAW3270_FIRSTMINOR);
858 if (!IS_ERR(view)) { 919 if (!IS_ERR(view)) {
859 tp = container_of(view, struct tty3270, view); 920 tp = container_of(view, struct tty3270, view);
860 tty->driver_data = tp; 921 tty->driver_data = tp;
@@ -866,29 +927,26 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
866 tp->inattr = TF_INPUT; 927 tp->inattr = TF_INPUT;
867 return tty_port_install(&tp->port, driver, tty); 928 return tty_port_install(&tp->port, driver, tty);
868 } 929 }
869 if (tty3270_max_index < tty->index + 1) 930 if (tty3270_max_index < tty->index)
870 tty3270_max_index = tty->index + 1; 931 tty3270_max_index = tty->index;
871
872 /* Quick exit if there is no device for tty->index. */
873 if (PTR_ERR(view) == -ENODEV)
874 return -ENODEV;
875 932
876 /* Allocate tty3270 structure on first open. */ 933 /* Allocate tty3270 structure on first open. */
877 tp = tty3270_alloc_view(); 934 tp = tty3270_alloc_view();
878 if (IS_ERR(tp)) 935 if (IS_ERR(tp))
879 return PTR_ERR(tp); 936 return PTR_ERR(tp);
880 937
881 rc = raw3270_add_view(&tp->view, &tty3270_fn, 938 rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
882 tty->index + RAW3270_FIRSTMINOR);
883 if (rc) { 939 if (rc) {
884 tty3270_free_view(tp); 940 tty3270_free_view(tp);
885 return rc; 941 return rc;
886 } 942 }
887 943
888 rc = tty3270_alloc_screen(tp); 944 tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
889 if (rc) { 945 if (IS_ERR(tp->screen)) {
946 rc = PTR_ERR(tp->screen);
890 raw3270_put_view(&tp->view); 947 raw3270_put_view(&tp->view);
891 raw3270_del_view(&tp->view); 948 raw3270_del_view(&tp->view);
949 tty3270_free_view(tp);
892 return rc; 950 return rc;
893 } 951 }
894 952
@@ -926,6 +984,20 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
926} 984}
927 985
928/* 986/*
987 * This routine is called whenever a 3270 tty is opened.
988 */
989static int
990tty3270_open(struct tty_struct *tty, struct file *filp)
991{
992 struct tty3270 *tp = tty->driver_data;
993 struct tty_port *port = &tp->port;
994
995 port->count++;
996 tty_port_tty_set(port, tty);
997 return 0;
998}
999
1000/*
929 * This routine is called when the 3270 tty is closed. We wait 1001 * This routine is called when the 3270 tty is closed. We wait
930 * for the remaining request to be completed. Then we clean up. 1002 * for the remaining request to be completed. Then we clean up.
931 */ 1003 */
@@ -1753,6 +1825,7 @@ static long tty3270_compat_ioctl(struct tty_struct *tty,
1753static const struct tty_operations tty3270_ops = { 1825static const struct tty_operations tty3270_ops = {
1754 .install = tty3270_install, 1826 .install = tty3270_install,
1755 .cleanup = tty3270_cleanup, 1827 .cleanup = tty3270_cleanup,
1828 .open = tty3270_open,
1756 .close = tty3270_close, 1829 .close = tty3270_close,
1757 .write = tty3270_write, 1830 .write = tty3270_write,
1758 .put_char = tty3270_put_char, 1831 .put_char = tty3270_put_char,
@@ -1771,6 +1844,22 @@ static const struct tty_operations tty3270_ops = {
1771 .set_termios = tty3270_set_termios 1844 .set_termios = tty3270_set_termios
1772}; 1845};
1773 1846
1847void tty3270_create_cb(int minor)
1848{
1849 tty_register_device(tty3270_driver, minor, NULL);
1850}
1851
1852void tty3270_destroy_cb(int minor)
1853{
1854 tty_unregister_device(tty3270_driver, minor);
1855}
1856
1857struct raw3270_notifier tty3270_notifier =
1858{
1859 .create = tty3270_create_cb,
1860 .destroy = tty3270_destroy_cb,
1861};
1862
1774/* 1863/*
1775 * 3270 tty registration code called from tty_init(). 1864 * 3270 tty registration code called from tty_init().
1776 * Most kernel services (incl. kmalloc) are available at this poimt. 1865 * Most kernel services (incl. kmalloc) are available at this poimt.
@@ -1780,23 +1869,25 @@ static int __init tty3270_init(void)
1780 struct tty_driver *driver; 1869 struct tty_driver *driver;
1781 int ret; 1870 int ret;
1782 1871
1783 driver = alloc_tty_driver(RAW3270_MAXDEVS); 1872 driver = tty_alloc_driver(RAW3270_MAXDEVS,
1784 if (!driver) 1873 TTY_DRIVER_REAL_RAW |
1785 return -ENOMEM; 1874 TTY_DRIVER_DYNAMIC_DEV |
1875 TTY_DRIVER_RESET_TERMIOS);
1876 if (IS_ERR(driver))
1877 return PTR_ERR(driver);
1786 1878
1787 /* 1879 /*
1788 * Initialize the tty_driver structure 1880 * Initialize the tty_driver structure
1789 * Entries in tty3270_driver that are NOT initialized: 1881 * Entries in tty3270_driver that are NOT initialized:
1790 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc 1882 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1791 */ 1883 */
1792 driver->driver_name = "ttyTUB"; 1884 driver->driver_name = "tty3270";
1793 driver->name = "ttyTUB"; 1885 driver->name = "3270/tty";
1794 driver->major = IBM_TTY3270_MAJOR; 1886 driver->major = IBM_TTY3270_MAJOR;
1795 driver->minor_start = RAW3270_FIRSTMINOR; 1887 driver->minor_start = 0;
1796 driver->type = TTY_DRIVER_TYPE_SYSTEM; 1888 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1797 driver->subtype = SYSTEM_TYPE_TTY; 1889 driver->subtype = SYSTEM_TYPE_TTY;
1798 driver->init_termios = tty_std_termios; 1890 driver->init_termios = tty_std_termios;
1799 driver->flags = TTY_DRIVER_RESET_TERMIOS;
1800 tty_set_operations(driver, &tty3270_ops); 1891 tty_set_operations(driver, &tty3270_ops);
1801 ret = tty_register_driver(driver); 1892 ret = tty_register_driver(driver);
1802 if (ret) { 1893 if (ret) {
@@ -1804,6 +1895,7 @@ static int __init tty3270_init(void)
1804 return ret; 1895 return ret;
1805 } 1896 }
1806 tty3270_driver = driver; 1897 tty3270_driver = driver;
1898 raw3270_register_notifier(&tty3270_notifier);
1807 return 0; 1899 return 0;
1808} 1900}
1809 1901
@@ -1812,6 +1904,7 @@ tty3270_exit(void)
1812{ 1904{
1813 struct tty_driver *driver; 1905 struct tty_driver *driver;
1814 1906
1907 raw3270_unregister_notifier(&tty3270_notifier);
1815 driver = tty3270_driver; 1908 driver = tty3270_driver;
1816 tty3270_driver = NULL; 1909 tty3270_driver = NULL;
1817 tty_unregister_driver(driver); 1910 tty_unregister_driver(driver);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index e3b9308b0fe3..1d61a01576d2 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -62,6 +62,7 @@ static struct dentry *zcore_dir;
62static struct dentry *zcore_file; 62static struct dentry *zcore_file;
63static struct dentry *zcore_memmap_file; 63static struct dentry *zcore_memmap_file;
64static struct dentry *zcore_reipl_file; 64static struct dentry *zcore_reipl_file;
65static struct dentry *zcore_hsa_file;
65static struct ipl_parameter_block *ipl_block; 66static struct ipl_parameter_block *ipl_block;
66 67
67/* 68/*
@@ -77,6 +78,8 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
77 int offs, blk_num; 78 int offs, blk_num;
78 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 79 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
79 80
81 if (!hsa_available)
82 return -ENODATA;
80 if (count == 0) 83 if (count == 0)
81 return 0; 84 return 0;
82 85
@@ -278,6 +281,15 @@ next:
278} 281}
279 282
280/* 283/*
284 * Release the HSA
285 */
286static void release_hsa(void)
287{
288 diag308(DIAG308_REL_HSA, NULL);
289 hsa_available = 0;
290}
291
292/*
281 * Read routine for zcore character device 293 * Read routine for zcore character device
282 * First 4K are dump header 294 * First 4K are dump header
283 * Next 32MB are HSA Memory 295 * Next 32MB are HSA Memory
@@ -363,8 +375,8 @@ static int zcore_open(struct inode *inode, struct file *filp)
363 375
364static int zcore_release(struct inode *inode, struct file *filep) 376static int zcore_release(struct inode *inode, struct file *filep)
365{ 377{
366 diag308(DIAG308_REL_HSA, NULL); 378 if (hsa_available)
367 hsa_available = 0; 379 release_hsa();
368 return 0; 380 return 0;
369} 381}
370 382
@@ -474,6 +486,41 @@ static const struct file_operations zcore_reipl_fops = {
474 .llseek = no_llseek, 486 .llseek = no_llseek,
475}; 487};
476 488
489static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
490 size_t count, loff_t *ppos)
491{
492 static char str[18];
493
494 if (hsa_available)
495 snprintf(str, sizeof(str), "%lx\n", ZFCPDUMP_HSA_SIZE);
496 else
497 snprintf(str, sizeof(str), "0\n");
498 return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
499}
500
501static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
502 size_t count, loff_t *ppos)
503{
504 char value;
505
506 if (*ppos != 0)
507 return -EPIPE;
508 if (copy_from_user(&value, buf, 1))
509 return -EFAULT;
510 if (value != '0')
511 return -EINVAL;
512 release_hsa();
513 return count;
514}
515
516static const struct file_operations zcore_hsa_fops = {
517 .owner = THIS_MODULE,
518 .write = zcore_hsa_write,
519 .read = zcore_hsa_read,
520 .open = nonseekable_open,
521 .llseek = no_llseek,
522};
523
477#ifdef CONFIG_32BIT 524#ifdef CONFIG_32BIT
478 525
479static void __init set_lc_mask(struct save_area *map) 526static void __init set_lc_mask(struct save_area *map)
@@ -590,7 +637,7 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr)
590 hdr->rmem_size = memory; 637 hdr->rmem_size = memory;
591 hdr->mem_end = sys_info.mem_size; 638 hdr->mem_end = sys_info.mem_size;
592 hdr->num_pages = memory / PAGE_SIZE; 639 hdr->num_pages = memory / PAGE_SIZE;
593 hdr->tod = get_clock(); 640 hdr->tod = get_tod_clock();
594 get_cpu_id(&hdr->cpu_id); 641 get_cpu_id(&hdr->cpu_id);
595 for (i = 0; zfcpdump_save_areas[i]; i++) { 642 for (i = 0; zfcpdump_save_areas[i]; i++) {
596 prefix = zfcpdump_save_areas[i]->pref_reg; 643 prefix = zfcpdump_save_areas[i]->pref_reg;
@@ -658,6 +705,7 @@ static int __init zcore_init(void)
658 rc = check_sdias(); 705 rc = check_sdias();
659 if (rc) 706 if (rc)
660 goto fail; 707 goto fail;
708 hsa_available = 1;
661 709
662 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 710 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
663 if (rc) 711 if (rc)
@@ -714,9 +762,16 @@ static int __init zcore_init(void)
714 rc = -ENOMEM; 762 rc = -ENOMEM;
715 goto fail_memmap_file; 763 goto fail_memmap_file;
716 } 764 }
717 hsa_available = 1; 765 zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
766 NULL, &zcore_hsa_fops);
767 if (!zcore_hsa_file) {
768 rc = -ENOMEM;
769 goto fail_reipl_file;
770 }
718 return 0; 771 return 0;
719 772
773fail_reipl_file:
774 debugfs_remove(zcore_reipl_file);
720fail_memmap_file: 775fail_memmap_file:
721 debugfs_remove(zcore_memmap_file); 776 debugfs_remove(zcore_memmap_file);
722fail_file: 777fail_file:
@@ -733,6 +788,7 @@ static void __exit zcore_exit(void)
733 debug_unregister(zcore_dbf); 788 debug_unregister(zcore_dbf);
734 sclp_sdias_exit(); 789 sclp_sdias_exit();
735 free_page((unsigned long) ipl_block); 790 free_page((unsigned long) ipl_block);
791 debugfs_remove(zcore_hsa_file);
736 debugfs_remove(zcore_reipl_file); 792 debugfs_remove(zcore_reipl_file);
737 debugfs_remove(zcore_memmap_file); 793 debugfs_remove(zcore_memmap_file);
738 debugfs_remove(zcore_file); 794 debugfs_remove(zcore_file);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 10729bbceced..31ceef1beb8b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -435,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
435 435
436static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 436static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
437{ 437{
438#ifdef CONFIG_PCI
439 switch (sei_area->cc) { 438 switch (sei_area->cc) {
440 case 1: 439 case 1:
441 zpci_event_error(sei_area->ccdf); 440 zpci_event_error(sei_area->ccdf);
@@ -444,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
444 zpci_event_availability(sei_area->ccdf); 443 zpci_event_availability(sei_area->ccdf);
445 break; 444 break;
446 default: 445 default:
447 CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", 446 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
448 sei_area->cc); 447 sei_area->cc);
449 break; 448 break;
450 } 449 }
451#endif
452} 450}
453 451
454static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 452static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
@@ -471,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
471 chsc_process_sei_scm_change(sei_area); 469 chsc_process_sei_scm_change(sei_area);
472 break; 470 break;
473 default: /* other stuff */ 471 default: /* other stuff */
474 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 472 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
475 sei_area->cc); 473 sei_area->cc);
476 break; 474 break;
477 } 475 }
476
477 /* Check if we might have lost some information. */
478 if (sei_area->flags & 0x40) {
479 CIO_CRW_EVENT(2, "chsc: event overflow\n");
480 css_schedule_eval_all();
481 }
478} 482}
479 483
480static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) 484static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
481{ 485{
482 do { 486 do {
483 memset(sei, 0, sizeof(*sei)); 487 memset(sei, 0, sizeof(*sei));
@@ -488,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
488 if (chsc(sei)) 492 if (chsc(sei))
489 break; 493 break;
490 494
491 if (sei->response.code == 0x0001) { 495 if (sei->response.code != 0x0001) {
492 CIO_CRW_EVENT(2, "chsc: sei successful\n");
493
494 /* Check if we might have lost some information. */
495 if (sei->u.nt0_area.flags & 0x40) {
496 CIO_CRW_EVENT(2, "chsc: event overflow\n");
497 css_schedule_eval_all();
498 }
499
500 switch (sei->nt) {
501 case 0:
502 chsc_process_sei_nt0(&sei->u.nt0_area);
503 break;
504 case 2:
505 chsc_process_sei_nt2(&sei->u.nt2_area);
506 break;
507 default:
508 CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
509 sei->nt);
510 break;
511 }
512 } else {
513 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 496 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
514 sei->response.code); 497 sei->response.code);
515 break; 498 break;
516 } 499 }
517 } while (sei->u.nt0_area.flags & 0x80);
518 500
519 return 0; 501 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
502 switch (sei->nt) {
503 case 0:
504 chsc_process_sei_nt0(&sei->u.nt0_area);
505 break;
506 case 2:
507 chsc_process_sei_nt2(&sei->u.nt2_area);
508 break;
509 default:
510 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
511 break;
512 }
513 } while (sei->u.nt0_area.flags & 0x80);
520} 514}
521 515
516/*
517 * Handle channel subsystem related CRWs.
518 * Use store event information to find out what's going on.
519 *
520 * Note: Access to sei_page is serialized through machine check handler
521 * thread, so no need for locking.
522 */
522static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 523static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
523{ 524{
524 struct chsc_sei *sei; 525 struct chsc_sei *sei = sei_page;
525 526
526 if (overflow) { 527 if (overflow) {
527 css_schedule_eval_all(); 528 css_schedule_eval_all();
@@ -531,14 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
531 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 532 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
532 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 533 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
533 crw0->erc, crw0->rsid); 534 crw0->erc, crw0->rsid);
534 if (!sei_page)
535 return;
536 /* Access to sei_page is serialized through machine check handler
537 * thread, so no need for locking. */
538 sei = sei_page;
539 535
540 CIO_TRACE_EVENT(2, "prcss"); 536 CIO_TRACE_EVENT(2, "prcss");
541 __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 537 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
542} 538}
543 539
544void chsc_chp_online(struct chp_id chpid) 540void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 662dab4b93e6..227e05f674b3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -157,7 +157,7 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
157#ifdef CONFIG_SCM_BUS 157#ifdef CONFIG_SCM_BUS
158int scm_update_information(void); 158int scm_update_information(void);
159#else /* CONFIG_SCM_BUS */ 159#else /* CONFIG_SCM_BUS */
160#define scm_update_information() 0 160static inline int scm_update_information(void) { return 0; }
161#endif /* CONFIG_SCM_BUS */ 161#endif /* CONFIG_SCM_BUS */
162 162
163 163
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c8faf6230b0f..986ef6a92a41 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -962,9 +962,9 @@ static void css_reset(void)
962 atomic_inc(&chpid_reset_count); 962 atomic_inc(&chpid_reset_count);
963 } 963 }
964 /* Wait for machine check for all channel paths. */ 964 /* Wait for machine check for all channel paths. */
965 timeout = get_clock() + (RCHP_TIMEOUT << 12); 965 timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
966 while (atomic_read(&chpid_reset_count) != 0) { 966 while (atomic_read(&chpid_reset_count) != 0) {
967 if (get_clock() > timeout) 967 if (get_tod_clock() > timeout)
968 break; 968 break;
969 cpu_relax(); 969 cpu_relax();
970 } 970 }
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index c9fc61c0a866..4495e0627a40 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -33,7 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/timex.h> /* get_clock() */ 36#include <linux/timex.h> /* get_tod_clock() */
37 37
38#include <asm/ccwdev.h> 38#include <asm/ccwdev.h>
39#include <asm/cio.h> 39#include <asm/cio.h>
@@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
326 memcpy(cmb_data->last_block, hw_block, cmb_data->size); 326 memcpy(cmb_data->last_block, hw_block, cmb_data->size);
327 memcpy(reference_buf, hw_block, cmb_data->size); 327 memcpy(reference_buf, hw_block, cmb_data->size);
328 } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); 328 } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
329 cmb_data->last_update = get_clock(); 329 cmb_data->last_update = get_tod_clock();
330 kfree(reference_buf); 330 kfree(reference_buf);
331 return 0; 331 return 0;
332} 332}
@@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
428 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); 428 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size);
429 cmb_data->last_update = 0; 429 cmb_data->last_update = 0;
430 } 430 }
431 cdev->private->cmb_start_time = get_clock(); 431 cdev->private->cmb_start_time = get_tod_clock();
432 spin_unlock_irq(cdev->ccwlock); 432 spin_unlock_irq(cdev->ccwlock);
433} 433}
434 434
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fd00afd8b850..a239237d43f3 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -780,7 +780,7 @@ static int __init setup_css(int nr)
780 css->cssid = nr; 780 css->cssid = nr;
781 dev_set_name(&css->device, "css%x", nr); 781 dev_set_name(&css->device, "css%x", nr);
782 css->device.release = channel_subsystem_release; 782 css->device.release = channel_subsystem_release;
783 tod_high = (u32) (get_clock() >> 32); 783 tod_high = (u32) (get_tod_clock() >> 32);
784 css_generate_pgid(css, tod_high); 784 css_generate_pgid(css, tod_high);
785 return 0; 785 return 0;
786} 786}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7cd5c6812ac7..c6767f5a58b2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -632,6 +632,14 @@ initiate_logging(struct device *dev, struct device_attribute *attr,
632 return count; 632 return count;
633} 633}
634 634
635static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
636 char *buf)
637{
638 struct subchannel *sch = to_subchannel(dev);
639
640 return sprintf(buf, "%02x\n", sch->vpm);
641}
642
635static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 643static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
636static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 644static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
637static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 645static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
@@ -640,11 +648,13 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
640static DEVICE_ATTR(online, 0644, online_show, online_store); 648static DEVICE_ATTR(online, 0644, online_show, online_store);
641static DEVICE_ATTR(availability, 0444, available_show, NULL); 649static DEVICE_ATTR(availability, 0444, available_show, NULL);
642static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 650static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
651static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
643 652
644static struct attribute *io_subchannel_attrs[] = { 653static struct attribute *io_subchannel_attrs[] = {
645 &dev_attr_chpids.attr, 654 &dev_attr_chpids.attr,
646 &dev_attr_pimpampom.attr, 655 &dev_attr_pimpampom.attr,
647 &dev_attr_logging.attr, 656 &dev_attr_logging.attr,
657 &dev_attr_vpm.attr,
648 NULL, 658 NULL,
649}; 659};
650 660
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1bb1d00095af..c7638c543250 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
47 cc = stsch_err(sch->schid, &schib); 47 cc = stsch_err(sch->schid, &schib);
48 48
49 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 49 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
50 "device information:\n", get_clock()); 50 "device information:\n", get_tod_clock());
51 printk(KERN_WARNING "cio: orb:\n"); 51 printk(KERN_WARNING "cio: orb:\n");
52 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 52 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
53 orb, sizeof(*orb), 0); 53 orb, sizeof(*orb), 0);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287f66c1..37ada05e82a5 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -23,6 +23,8 @@
23#define PGID_RETRIES 256 23#define PGID_RETRIES 256
24#define PGID_TIMEOUT (10 * HZ) 24#define PGID_TIMEOUT (10 * HZ)
25 25
26static void verify_start(struct ccw_device *cdev);
27
26/* 28/*
27 * Process path verification data and report result. 29 * Process path verification data and report result.
28 */ 30 */
@@ -70,8 +72,8 @@ static void nop_do(struct ccw_device *cdev)
70 struct subchannel *sch = to_subchannel(cdev->dev.parent); 72 struct subchannel *sch = to_subchannel(cdev->dev.parent);
71 struct ccw_request *req = &cdev->private->req; 73 struct ccw_request *req = &cdev->private->req;
72 74
73 /* Adjust lpm. */ 75 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
74 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm); 76 ~cdev->private->path_noirq_mask);
75 if (!req->lpm) 77 if (!req->lpm)
76 goto out_nopath; 78 goto out_nopath;
77 nop_build_cp(cdev); 79 nop_build_cp(cdev);
@@ -102,10 +104,20 @@ static void nop_callback(struct ccw_device *cdev, void *data, int rc)
102 struct subchannel *sch = to_subchannel(cdev->dev.parent); 104 struct subchannel *sch = to_subchannel(cdev->dev.parent);
103 struct ccw_request *req = &cdev->private->req; 105 struct ccw_request *req = &cdev->private->req;
104 106
105 if (rc == 0) 107 switch (rc) {
108 case 0:
106 sch->vpm |= req->lpm; 109 sch->vpm |= req->lpm;
107 else if (rc != -EACCES) 110 break;
111 case -ETIME:
112 cdev->private->path_noirq_mask |= req->lpm;
113 break;
114 case -EACCES:
115 cdev->private->path_notoper_mask |= req->lpm;
116 break;
117 default:
108 goto err; 118 goto err;
119 }
120 /* Continue on the next path. */
109 req->lpm >>= 1; 121 req->lpm >>= 1;
110 nop_do(cdev); 122 nop_do(cdev);
111 return; 123 return;
@@ -132,6 +144,48 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
132 req->cp = cp; 144 req->cp = cp;
133} 145}
134 146
147static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
148{
149 if (rc) {
150 /* We don't know the path groups' state. Abort. */
151 verify_done(cdev, rc);
152 return;
153 }
154 /*
155 * Path groups have been reset. Restart path verification but
156 * leave paths in path_noirq_mask out.
157 */
158 cdev->private->flags.pgid_unknown = 0;
159 verify_start(cdev);
160}
161
162/*
163 * Reset pathgroups and restart path verification, leave unusable paths out.
164 */
165static void pgid_wipeout_start(struct ccw_device *cdev)
166{
167 struct subchannel *sch = to_subchannel(cdev->dev.parent);
168 struct ccw_dev_id *id = &cdev->private->dev_id;
169 struct ccw_request *req = &cdev->private->req;
170 u8 fn;
171
172 CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
173 id->ssid, id->devno, cdev->private->pgid_valid_mask,
174 cdev->private->path_noirq_mask);
175
176 /* Initialize request data. */
177 memset(req, 0, sizeof(*req));
178 req->timeout = PGID_TIMEOUT;
179 req->maxretries = PGID_RETRIES;
180 req->lpm = sch->schib.pmcw.pam;
181 req->callback = pgid_wipeout_callback;
182 fn = SPID_FUNC_DISBAND;
183 if (cdev->private->flags.mpath)
184 fn |= SPID_FUNC_MULTI_PATH;
185 spid_build_cp(cdev, fn);
186 ccw_request_start(cdev);
187}
188
135/* 189/*
136 * Perform establish/resign SET PGID on a single path. 190 * Perform establish/resign SET PGID on a single path.
137 */ 191 */
@@ -157,11 +211,14 @@ static void spid_do(struct ccw_device *cdev)
157 return; 211 return;
158 212
159out_nopath: 213out_nopath:
214 if (cdev->private->flags.pgid_unknown) {
215 /* At least one SPID could be partially done. */
216 pgid_wipeout_start(cdev);
217 return;
218 }
160 verify_done(cdev, sch->vpm ? 0 : -EACCES); 219 verify_done(cdev, sch->vpm ? 0 : -EACCES);
161} 220}
162 221
163static void verify_start(struct ccw_device *cdev);
164
165/* 222/*
166 * Process SET PGID request result for a single path. 223 * Process SET PGID request result for a single path.
167 */ 224 */
@@ -174,7 +231,12 @@ static void spid_callback(struct ccw_device *cdev, void *data, int rc)
174 case 0: 231 case 0:
175 sch->vpm |= req->lpm & sch->opm; 232 sch->vpm |= req->lpm & sch->opm;
176 break; 233 break;
234 case -ETIME:
235 cdev->private->flags.pgid_unknown = 1;
236 cdev->private->path_noirq_mask |= req->lpm;
237 break;
177 case -EACCES: 238 case -EACCES:
239 cdev->private->path_notoper_mask |= req->lpm;
178 break; 240 break;
179 case -EOPNOTSUPP: 241 case -EOPNOTSUPP:
180 if (cdev->private->flags.mpath) { 242 if (cdev->private->flags.mpath) {
@@ -330,8 +392,9 @@ static void snid_done(struct ccw_device *cdev, int rc)
330 else { 392 else {
331 donepm = pgid_to_donepm(cdev); 393 donepm = pgid_to_donepm(cdev);
332 sch->vpm = donepm & sch->opm; 394 sch->vpm = donepm & sch->opm;
333 cdev->private->pgid_todo_mask &= ~donepm;
334 cdev->private->pgid_reset_mask |= reset; 395 cdev->private->pgid_reset_mask |= reset;
396 cdev->private->pgid_todo_mask &=
397 ~(donepm | cdev->private->path_noirq_mask);
335 pgid_fill(cdev, pgid); 398 pgid_fill(cdev, pgid);
336 } 399 }
337out: 400out:
@@ -341,6 +404,10 @@ out:
341 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 404 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
342 switch (rc) { 405 switch (rc) {
343 case 0: 406 case 0:
407 if (cdev->private->flags.pgid_unknown) {
408 pgid_wipeout_start(cdev);
409 return;
410 }
344 /* Anything left to do? */ 411 /* Anything left to do? */
345 if (cdev->private->pgid_todo_mask == 0) { 412 if (cdev->private->pgid_todo_mask == 0) {
346 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 413 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
@@ -384,9 +451,10 @@ static void snid_do(struct ccw_device *cdev)
384{ 451{
385 struct subchannel *sch = to_subchannel(cdev->dev.parent); 452 struct subchannel *sch = to_subchannel(cdev->dev.parent);
386 struct ccw_request *req = &cdev->private->req; 453 struct ccw_request *req = &cdev->private->req;
454 int ret;
387 455
388 /* Adjust lpm if paths are not set in pam. */ 456 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
389 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam); 457 ~cdev->private->path_noirq_mask);
390 if (!req->lpm) 458 if (!req->lpm)
391 goto out_nopath; 459 goto out_nopath;
392 snid_build_cp(cdev); 460 snid_build_cp(cdev);
@@ -394,7 +462,13 @@ static void snid_do(struct ccw_device *cdev)
394 return; 462 return;
395 463
396out_nopath: 464out_nopath:
397 snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES); 465 if (cdev->private->pgid_valid_mask)
466 ret = 0;
467 else if (cdev->private->path_noirq_mask)
468 ret = -ETIME;
469 else
470 ret = -EACCES;
471 snid_done(cdev, ret);
398} 472}
399 473
400/* 474/*
@@ -404,10 +478,21 @@ static void snid_callback(struct ccw_device *cdev, void *data, int rc)
404{ 478{
405 struct ccw_request *req = &cdev->private->req; 479 struct ccw_request *req = &cdev->private->req;
406 480
407 if (rc == 0) 481 switch (rc) {
482 case 0:
408 cdev->private->pgid_valid_mask |= req->lpm; 483 cdev->private->pgid_valid_mask |= req->lpm;
409 else if (rc != -EACCES) 484 break;
485 case -ETIME:
486 cdev->private->flags.pgid_unknown = 1;
487 cdev->private->path_noirq_mask |= req->lpm;
488 break;
489 case -EACCES:
490 cdev->private->path_notoper_mask |= req->lpm;
491 break;
492 default:
410 goto err; 493 goto err;
494 }
495 /* Continue on the next path. */
411 req->lpm >>= 1; 496 req->lpm >>= 1;
412 snid_do(cdev); 497 snid_do(cdev);
413 return; 498 return;
@@ -427,6 +512,13 @@ static void verify_start(struct ccw_device *cdev)
427 512
428 sch->vpm = 0; 513 sch->vpm = 0;
429 sch->lpm = sch->schib.pmcw.pam; 514 sch->lpm = sch->schib.pmcw.pam;
515
516 /* Initialize PGID data. */
517 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
518 cdev->private->pgid_valid_mask = 0;
519 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
520 cdev->private->path_notoper_mask = 0;
521
430 /* Initialize request data. */ 522 /* Initialize request data. */
431 memset(req, 0, sizeof(*req)); 523 memset(req, 0, sizeof(*req));
432 req->timeout = PGID_TIMEOUT; 524 req->timeout = PGID_TIMEOUT;
@@ -459,14 +551,8 @@ static void verify_start(struct ccw_device *cdev)
459 */ 551 */
460void ccw_device_verify_start(struct ccw_device *cdev) 552void ccw_device_verify_start(struct ccw_device *cdev)
461{ 553{
462 struct subchannel *sch = to_subchannel(cdev->dev.parent);
463
464 CIO_TRACE_EVENT(4, "vrfy"); 554 CIO_TRACE_EVENT(4, "vrfy");
465 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 555 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
466 /* Initialize PGID data. */
467 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
468 cdev->private->pgid_valid_mask = 0;
469 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
470 /* 556 /*
471 * Initialize pathgroup and multipath state with target values. 557 * Initialize pathgroup and multipath state with target values.
472 * They may change in the course of path verification. 558 * They may change in the course of path verification.
@@ -474,6 +560,7 @@ void ccw_device_verify_start(struct ccw_device *cdev)
474 cdev->private->flags.pgroup = cdev->private->options.pgroup; 560 cdev->private->flags.pgroup = cdev->private->options.pgroup;
475 cdev->private->flags.mpath = cdev->private->options.mpath; 561 cdev->private->flags.mpath = cdev->private->options.mpath;
476 cdev->private->flags.doverify = 0; 562 cdev->private->flags.doverify = 0;
563 cdev->private->path_noirq_mask = 0;
477 verify_start(cdev); 564 verify_start(cdev);
478} 565}
479 566
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 76253dfcc1be..b108f4a5c7dd 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -126,6 +126,10 @@ struct ccw_device_private {
126 u8 pgid_valid_mask; /* mask of valid PGIDs */ 126 u8 pgid_valid_mask; /* mask of valid PGIDs */
127 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ 127 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
128 u8 pgid_reset_mask; /* mask of PGIDs which were reset */ 128 u8 pgid_reset_mask; /* mask of PGIDs which were reset */
129 u8 path_noirq_mask; /* mask of paths for which no irq was
130 received */
131 u8 path_notoper_mask; /* mask of paths which were found
132 not operable */
129 u8 path_gone_mask; /* mask of paths, that became unavailable */ 133 u8 path_gone_mask; /* mask of paths, that became unavailable */
130 u8 path_new_mask; /* mask of paths, that became available */ 134 u8 path_new_mask; /* mask of paths, that became available */
131 struct { 135 struct {
@@ -145,6 +149,7 @@ struct ccw_device_private {
145 unsigned int resuming:1; /* recognition while resume */ 149 unsigned int resuming:1; /* recognition while resume */
146 unsigned int pgroup:1; /* pathgroup is set up */ 150 unsigned int pgroup:1; /* pathgroup is set up */
147 unsigned int mpath:1; /* multipathing is set up */ 151 unsigned int mpath:1; /* multipathing is set up */
152 unsigned int pgid_unknown:1;/* unknown pgid state */
148 unsigned int initialized:1; /* set if initial reference held */ 153 unsigned int initialized:1; /* set if initial reference held */
149 } __attribute__((packed)) flags; 154 } __attribute__((packed)) flags;
150 unsigned long intparm; /* user interruption parameter */ 155 unsigned long intparm; /* user interruption parameter */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 1671d3461f29..abc550e5dd35 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
338 retries++; 338 retries++;
339 339
340 if (!start_time) { 340 if (!start_time) {
341 start_time = get_clock(); 341 start_time = get_tod_clock();
342 goto again; 342 goto again;
343 } 343 }
344 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
345 goto again; 345 goto again;
346 } 346 }
347 if (retries) { 347 if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
504 int count, stop; 504 int count, stop;
505 unsigned char state = 0; 505 unsigned char state = 0;
506 506
507 q->timestamp = get_clock(); 507 q->timestamp = get_tod_clock();
508 508
509 /* 509 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
563 if (bufnr != q->last_move) { 563 if (bufnr != q->last_move) {
564 q->last_move = bufnr; 564 q->last_move = bufnr;
565 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 565 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
566 q->u.in.timestamp = get_clock(); 566 q->u.in.timestamp = get_tod_clock();
567 return 1; 567 return 1;
568 } else 568 } else
569 return 0; 569 return 0;
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
595 * At this point we know, that inbound first_to_check 595 * At this point we know, that inbound first_to_check
596 * has (probably) not moved (see qdio_inbound_processing). 596 * has (probably) not moved (see qdio_inbound_processing).
597 */ 597 */
598 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
600 q->first_to_check); 600 q->first_to_check);
601 return 1; 601 return 1;
@@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
772 int count, stop; 772 int count, stop;
773 unsigned char state = 0; 773 unsigned char state = 0;
774 774
775 q->timestamp = get_clock(); 775 q->timestamp = get_tod_clock();
776 776
777 if (need_siga_sync(q)) 777 if (need_siga_sync(q))
778 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 778 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d690b33846cc..d87961d4c0de 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -818,7 +818,7 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
818 818
819static inline int qeth_get_micros(void) 819static inline int qeth_get_micros(void)
820{ 820{
821 return (int) (get_clock() >> 12); 821 return (int) (get_tod_clock() >> 12);
822} 822}
823 823
824static inline int qeth_get_ip_version(struct sk_buff *skb) 824static inline int qeth_get_ip_version(struct sk_buff *skb)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c96320d79fbc..c7e148f33b2a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -727,7 +727,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
727 zfcp_reqlist_add(adapter->req_list, req); 727 zfcp_reqlist_add(adapter->req_list, req);
728 728
729 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 729 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
730 req->issued = get_clock(); 730 req->issued = get_tod_clock();
731 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 731 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
732 del_timer(&req->timer); 732 del_timer(&req->timer);
733 /* lookup request again, list might have changed */ 733 /* lookup request again, list might have changed */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 50b5615848f6..665e3cfaaf85 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -68,7 +68,7 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
68 unsigned long long now, span; 68 unsigned long long now, span;
69 int used; 69 int used;
70 70
71 now = get_clock_monotonic(); 71 now = get_tod_clock_monotonic();
72 span = (now - qdio->req_q_time) >> 12; 72 span = (now - qdio->req_q_time) >> 12;
73 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 73 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
74 qdio->req_q_util += used * span; 74 qdio->req_q_util += used * span;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 60e48a11b66c..fd473639ab70 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2199,6 +2199,7 @@ done:
2199 mutex_unlock(&tty->termios_mutex); 2199 mutex_unlock(&tty->termios_mutex);
2200 return 0; 2200 return 0;
2201} 2201}
2202EXPORT_SYMBOL(tty_do_resize);
2202 2203
2203/** 2204/**
2204 * tiocswinsz - implement window size set ioctl 2205 * tiocswinsz - implement window size set ioctl
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index f56d185790ea..e92eeaf251fe 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -1,6 +1,5 @@
1menuconfig UIO 1menuconfig UIO
2 tristate "Userspace I/O drivers" 2 tristate "Userspace I/O drivers"
3 depends on !S390
4 help 3 help
5 Enable this to allow the userspace driver core code to be 4 Enable this to allow the userspace driver core code to be
6 built. This code allows userspace programs easy access to 5 built. This code allows userspace programs easy access to
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 33bbbae4ddc6..8e260cf01351 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -53,8 +53,18 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
53#endif 53#endif
54 54
55#define readb __raw_readb 55#define readb __raw_readb
56#define readw(addr) __le16_to_cpu(__raw_readw(addr)) 56
57#define readl(addr) __le32_to_cpu(__raw_readl(addr)) 57#define readw readw
58static inline u16 readw(const volatile void __iomem *addr)
59{
60 return __le16_to_cpu(__raw_readw(addr));
61}
62
63#define readl readl
64static inline u32 readl(const volatile void __iomem *addr)
65{
66 return __le32_to_cpu(__raw_readl(addr));
67}
58 68
59#ifndef __raw_writeb 69#ifndef __raw_writeb
60static inline void __raw_writeb(u8 b, volatile void __iomem *addr) 70static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
@@ -89,7 +99,11 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
89} 99}
90#endif 100#endif
91 101
92#define readq(addr) __le64_to_cpu(__raw_readq(addr)) 102#define readq readq
103static inline u64 readq(const volatile void __iomem *addr)
104{
105 return __le64_to_cpu(__raw_readq(addr));
106}
93 107
94#ifndef __raw_writeq 108#ifndef __raw_writeq
95static inline void __raw_writeq(u64 b, volatile void __iomem *addr) 109static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5cf680a98f9b..bfd87685fc1f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -197,16 +197,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
197#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 197#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
198#endif 198#endif
199 199
200#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
201#define page_test_and_clear_dirty(pfn, mapped) (0)
202#endif
203
204#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
205#define pte_maybe_dirty(pte) pte_dirty(pte)
206#else
207#define pte_maybe_dirty(pte) (1)
208#endif
209
210#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 200#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
211#define page_test_and_clear_young(pfn) (0) 201#define page_test_and_clear_young(pfn) (0)
212#endif 202#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 70473da47b3f..6d53675c2b54 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -303,21 +303,13 @@ static inline void __SetPageUptodate(struct page *page)
303 303
304static inline void SetPageUptodate(struct page *page) 304static inline void SetPageUptodate(struct page *page)
305{ 305{
306#ifdef CONFIG_S390
307 if (!test_and_set_bit(PG_uptodate, &page->flags))
308 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0);
309#else
310 /* 306 /*
311 * Memory barrier must be issued before setting the PG_uptodate bit, 307 * Memory barrier must be issued before setting the PG_uptodate bit,
312 * so that all previous stores issued in order to bring the page 308 * so that all previous stores issued in order to bring the page
313 * uptodate are actually visible before PageUptodate becomes true. 309 * uptodate are actually visible before PageUptodate becomes true.
314 *
315 * s390 doesn't need an explicit smp_wmb here because the test and
316 * set bit already provides full barriers.
317 */ 310 */
318 smp_wmb(); 311 smp_wmb();
319 set_bit(PG_uptodate, &(page)->flags); 312 set_bit(PG_uptodate, &(page)->flags);
320#endif
321} 313}
322 314
323CLEARPAGEFLAG(Uptodate, uptodate) 315CLEARPAGEFLAG(Uptodate, uptodate)
diff --git a/mm/rmap.c b/mm/rmap.c
index 2c78f8cadc95..3d38edffda41 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1126,7 +1126,6 @@ void page_add_file_rmap(struct page *page)
1126 */ 1126 */
1127void page_remove_rmap(struct page *page) 1127void page_remove_rmap(struct page *page)
1128{ 1128{
1129 struct address_space *mapping = page_mapping(page);
1130 bool anon = PageAnon(page); 1129 bool anon = PageAnon(page);
1131 bool locked; 1130 bool locked;
1132 unsigned long flags; 1131 unsigned long flags;
@@ -1144,29 +1143,6 @@ void page_remove_rmap(struct page *page)
1144 goto out; 1143 goto out;
1145 1144
1146 /* 1145 /*
1147 * Now that the last pte has gone, s390 must transfer dirty
1148 * flag from storage key to struct page. We can usually skip
1149 * this if the page is anon, so about to be freed; but perhaps
1150 * not if it's in swapcache - there might be another pte slot
1151 * containing the swap entry, but page not yet written to swap.
1152 *
1153 * And we can skip it on file pages, so long as the filesystem
1154 * participates in dirty tracking (note that this is not only an
1155 * optimization but also solves problems caused by dirty flag in
1156 * storage key getting set by a write from inside kernel); but need to
1157 * catch shm and tmpfs and ramfs pages which have been modified since
1158 * creation by read fault.
1159 *
1160 * Note that mapping must be decided above, before decrementing
1161 * mapcount (which luckily provides a barrier): once page is unmapped,
1162 * it could be truncated and page->mapping reset to NULL at any moment.
1163 * Note also that we are relying on page_mapping(page) to set mapping
1164 * to &swapper_space when PageSwapCache(page).
1165 */
1166 if (mapping && !mapping_cap_account_dirty(mapping) &&
1167 page_test_and_clear_dirty(page_to_pfn(page), 1))
1168 set_page_dirty(page);
1169 /*
1170 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 1146 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1171 * and not charged by memcg for now. 1147 * and not charged by memcg for now.
1172 */ 1148 */
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index df082508362d..4fe76ff214c2 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -831,8 +831,11 @@ static int iucv_reboot_event(struct notifier_block *this,
831{ 831{
832 int i; 832 int i;
833 833
834 if (cpumask_empty(&iucv_irq_cpumask))
835 return NOTIFY_DONE;
836
834 get_online_cpus(); 837 get_online_cpus();
835 on_each_cpu(iucv_block_cpu, NULL, 1); 838 on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
836 preempt_disable(); 839 preempt_disable();
837 for (i = 0; i < iucv_max_pathid; i++) { 840 for (i = 0; i < iucv_max_pathid; i++) {
838 if (iucv_path_table[i]) 841 if (iucv_path_table[i])