aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/atomic.h28
-rw-r--r--arch/s390/include/asm/bitops.h40
-rw-r--r--arch/s390/include/asm/cache.h1
-rw-r--r--arch/s390/include/asm/cacheflush.h28
-rw-r--r--arch/s390/include/asm/ccwdev.h18
-rw-r--r--arch/s390/include/asm/ccwgroup.h4
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/cmpxchg.h224
-rw-r--r--arch/s390/include/asm/compat.h2
-rw-r--r--arch/s390/include/asm/cpu.h2
-rw-r--r--arch/s390/include/asm/cputime.h12
-rw-r--r--arch/s390/include/asm/dasd.h43
-rw-r--r--arch/s390/include/asm/delay.h8
-rw-r--r--arch/s390/include/asm/diag.h17
-rw-r--r--arch/s390/include/asm/elf.h19
-rw-r--r--arch/s390/include/asm/ftrace.h11
-rw-r--r--arch/s390/include/asm/futex.h12
-rw-r--r--arch/s390/include/asm/hardirq.h20
-rw-r--r--arch/s390/include/asm/hugetlb.h43
-rw-r--r--arch/s390/include/asm/ioctls.h88
-rw-r--r--arch/s390/include/asm/irq.h43
-rw-r--r--arch/s390/include/asm/irqflags.h51
-rw-r--r--arch/s390/include/asm/jump_label.h37
-rw-r--r--arch/s390/include/asm/kprobes.h20
-rw-r--r--arch/s390/include/asm/kvm_virtio.h7
-rw-r--r--arch/s390/include/asm/lowcore.h15
-rw-r--r--arch/s390/include/asm/mmu.h9
-rw-r--r--arch/s390/include/asm/mmu_context.h8
-rw-r--r--arch/s390/include/asm/mutex.h2
-rw-r--r--arch/s390/include/asm/page.h71
-rw-r--r--arch/s390/include/asm/percpu.h68
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h65
-rw-r--r--arch/s390/include/asm/pgtable.h678
-rw-r--r--arch/s390/include/asm/processor.h12
-rw-r--r--arch/s390/include/asm/ptrace.h55
-rw-r--r--arch/s390/include/asm/qdio.h133
-rw-r--r--arch/s390/include/asm/qeth.h51
-rw-r--r--arch/s390/include/asm/rwsem.h63
-rw-r--r--arch/s390/include/asm/s390_ext.h32
-rw-r--r--arch/s390/include/asm/scatterlist.h2
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/smp.h3
-rw-r--r--arch/s390/include/asm/suspend.h10
-rw-r--r--arch/s390/include/asm/syscall.h4
-rw-r--r--arch/s390/include/asm/sysinfo.h40
-rw-r--r--arch/s390/include/asm/system.h256
-rw-r--r--arch/s390/include/asm/thread_info.h16
-rw-r--r--arch/s390/include/asm/timex.h20
-rw-r--r--arch/s390/include/asm/tlb.h108
-rw-r--r--arch/s390/include/asm/tlbflush.h13
-rw-r--r--arch/s390/include/asm/topology.h29
-rw-r--r--arch/s390/include/asm/types.h8
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/include/asm/unistd.h8
56 files changed, 1345 insertions, 1236 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 42e512ba8b43..287d7bbb6d36 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -5,6 +5,7 @@ header-y += chsc.h
5header-y += cmb.h 5header-y += cmb.h
6header-y += dasd.h 6header-y += dasd.h
7header-y += debug.h 7header-y += debug.h
8header-y += kvm_virtio.h
8header-y += monwriter.h 9header-y += monwriter.h
9header-y += qeth.h 10header-y += qeth.h
10header-y += schid.h 11header-y += schid.h
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 76daea117181..d9db13810d15 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -9,7 +9,7 @@
9 * 9 *
10 * Atomic operations that C can't guarantee us. 10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc. 11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. 12 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
13 * 13 *
14 */ 14 */
15 15
@@ -36,14 +36,19 @@
36 36
37static inline int atomic_read(const atomic_t *v) 37static inline int atomic_read(const atomic_t *v)
38{ 38{
39 barrier(); 39 int c;
40 return v->counter; 40
41 asm volatile(
42 " l %0,%1\n"
43 : "=d" (c) : "Q" (v->counter));
44 return c;
41} 45}
42 46
43static inline void atomic_set(atomic_t *v, int i) 47static inline void atomic_set(atomic_t *v, int i)
44{ 48{
45 v->counter = i; 49 asm volatile(
46 barrier(); 50 " st %1,%0\n"
51 : "=Q" (v->counter) : "d" (i));
47} 52}
48 53
49static inline int atomic_add_return(int i, atomic_t *v) 54static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
128 133
129static inline long long atomic64_read(const atomic64_t *v) 134static inline long long atomic64_read(const atomic64_t *v)
130{ 135{
131 barrier(); 136 long long c;
132 return v->counter; 137
138 asm volatile(
139 " lg %0,%1\n"
140 : "=d" (c) : "Q" (v->counter));
141 return c;
133} 142}
134 143
135static inline void atomic64_set(atomic64_t *v, long long i) 144static inline void atomic64_set(atomic64_t *v, long long i)
136{ 145{
137 v->counter = i; 146 asm volatile(
138 barrier(); 147 " stg %1,%0\n"
148 : "=Q" (v->counter) : "d" (i));
139} 149}
140 150
141static inline long long atomic64_add_return(long long i, atomic64_t *v) 151static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 2e05972c5085..667c6e9f6a34 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -621,6 +621,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); 621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
622 return (bits < size) ? bits : size; 622 return (bits < size) ? bits : size;
623} 623}
624#define find_first_zero_bit find_first_zero_bit
624 625
625/** 626/**
626 * find_first_bit - find the first set bit in a memory region 627 * find_first_bit - find the first set bit in a memory region
@@ -641,6 +642,7 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
641 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); 642 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
642 return (bits < size) ? bits : size; 643 return (bits < size) ? bits : size;
643} 644}
645#define find_first_bit find_first_bit
644 646
645/** 647/**
646 * find_next_zero_bit - find the first zero bit in a memory region 648 * find_next_zero_bit - find the first zero bit in a memory region
@@ -677,6 +679,7 @@ static inline int find_next_zero_bit (const unsigned long * addr,
677 } 679 }
678 return offset + find_first_zero_bit(p, size); 680 return offset + find_first_zero_bit(p, size);
679} 681}
682#define find_next_zero_bit find_next_zero_bit
680 683
681/** 684/**
682 * find_next_bit - find the first set bit in a memory region 685 * find_next_bit - find the first set bit in a memory region
@@ -713,6 +716,7 @@ static inline int find_next_bit (const unsigned long * addr,
713 } 716 }
714 return offset + find_first_bit(p, size); 717 return offset + find_first_bit(p, size);
715} 718}
719#define find_next_bit find_next_bit
716 720
717/* 721/*
718 * Every architecture must define this function. It's the fastest 722 * Every architecture must define this function. It's the fastest
@@ -742,18 +746,7 @@ static inline int sched_find_first_bit(unsigned long *b)
742 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 746 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
743 */ 747 */
744 748
745#define ext2_set_bit(nr, addr) \ 749static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
746 __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
747#define ext2_set_bit_atomic(lock, nr, addr) \
748 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
749#define ext2_clear_bit(nr, addr) \
750 __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
751#define ext2_clear_bit_atomic(lock, nr, addr) \
752 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
753#define ext2_test_bit(nr, addr) \
754 test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
755
756static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
757{ 750{
758 unsigned long bytes, bits; 751 unsigned long bytes, bits;
759 752
@@ -763,8 +756,9 @@ static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
763 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); 756 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
764 return (bits < size) ? bits : size; 757 return (bits < size) ? bits : size;
765} 758}
759#define find_first_zero_bit_le find_first_zero_bit_le
766 760
767static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size, 761static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
768 unsigned long offset) 762 unsigned long offset)
769{ 763{
770 unsigned long *addr = vaddr, *p; 764 unsigned long *addr = vaddr, *p;
@@ -790,11 +784,11 @@ static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size,
790 size -= __BITOPS_WORDSIZE; 784 size -= __BITOPS_WORDSIZE;
791 p++; 785 p++;
792 } 786 }
793 return offset + ext2_find_first_zero_bit(p, size); 787 return offset + find_first_zero_bit_le(p, size);
794} 788}
789#define find_next_zero_bit_le find_next_zero_bit_le
795 790
796static inline unsigned long ext2_find_first_bit(void *vaddr, 791static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
797 unsigned long size)
798{ 792{
799 unsigned long bytes, bits; 793 unsigned long bytes, bits;
800 794
@@ -804,8 +798,9 @@ static inline unsigned long ext2_find_first_bit(void *vaddr,
804 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); 798 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
805 return (bits < size) ? bits : size; 799 return (bits < size) ? bits : size;
806} 800}
801#define find_first_bit_le find_first_bit_le
807 802
808static inline int ext2_find_next_bit(void *vaddr, unsigned long size, 803static inline int find_next_bit_le(void *vaddr, unsigned long size,
809 unsigned long offset) 804 unsigned long offset)
810{ 805{
811 unsigned long *addr = vaddr, *p; 806 unsigned long *addr = vaddr, *p;
@@ -831,10 +826,17 @@ static inline int ext2_find_next_bit(void *vaddr, unsigned long size,
831 size -= __BITOPS_WORDSIZE; 826 size -= __BITOPS_WORDSIZE;
832 p++; 827 p++;
833 } 828 }
834 return offset + ext2_find_first_bit(p, size); 829 return offset + find_first_bit_le(p, size);
835} 830}
831#define find_next_bit_le find_next_bit_le
832
833#include <asm-generic/bitops/le.h>
834
835#define ext2_set_bit_atomic(lock, nr, addr) \
836 test_and_set_bit_le(nr, addr)
837#define ext2_clear_bit_atomic(lock, nr, addr) \
838 test_and_clear_bit_le(nr, addr)
836 839
837#include <asm-generic/bitops/minix.h>
838 840
839#endif /* __KERNEL__ */ 841#endif /* __KERNEL__ */
840 842
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 24aafa68b643..2a30d5ac0667 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,6 +13,7 @@
13 13
14#define L1_CACHE_BYTES 256 14#define L1_CACHE_BYTES 256
15#define L1_CACHE_SHIFT 8 15#define L1_CACHE_SHIFT 8
16#define NET_SKB_PAD 32
16 17
17#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 18#define __read_mostly __attribute__((__section__(".data..read_mostly")))
18 19
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 405cc97c6249..3e20383d0921 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -1,32 +1,16 @@
1#ifndef _S390_CACHEFLUSH_H 1#ifndef _S390_CACHEFLUSH_H
2#define _S390_CACHEFLUSH_H 2#define _S390_CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the s390. */ 4/* Caches aren't brain-dead on the s390. */
8#define flush_cache_all() do { } while (0) 5#include <asm-generic/cacheflush.h>
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_dup_mm(mm) do { } while (0)
11#define flush_cache_range(vma, start, end) do { } while (0)
12#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
13#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
14#define flush_dcache_page(page) do { } while (0)
15#define flush_dcache_mmap_lock(mapping) do { } while (0)
16#define flush_dcache_mmap_unlock(mapping) do { } while (0)
17#define flush_icache_range(start, end) do { } while (0)
18#define flush_icache_page(vma,pg) do { } while (0)
19#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
20#define flush_cache_vmap(start, end) do { } while (0)
21#define flush_cache_vunmap(start, end) do { } while (0)
22
23#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
24 memcpy(dst, src, len)
25#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
26 memcpy(dst, src, len)
27 6
28#ifdef CONFIG_DEBUG_PAGEALLOC 7#ifdef CONFIG_DEBUG_PAGEALLOC
29void kernel_map_pages(struct page *page, int numpages, int enable); 8void kernel_map_pages(struct page *page, int numpages, int enable);
30#endif 9#endif
31 10
11int set_memory_ro(unsigned long addr, int numpages);
12int set_memory_rw(unsigned long addr, int numpages);
13int set_memory_nx(unsigned long addr, int numpages);
14int set_memory_x(unsigned long addr, int numpages);
15
32#endif /* _S390_CACHEFLUSH_H */ 16#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f3ba0fa98de6..623f2fb71774 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -92,6 +92,16 @@ struct ccw_device {
92}; 92};
93 93
94/* 94/*
95 * Possible events used by the path_event notifier.
96 */
97#define PE_NONE 0x0
98#define PE_PATH_GONE 0x1 /* A path is no longer available. */
99#define PE_PATH_AVAILABLE 0x2 /* A path has become available and
100 was successfully verified. */
101#define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had
102 to be established again. */
103
104/*
95 * Possible CIO actions triggered by the unit check handler. 105 * Possible CIO actions triggered by the unit check handler.
96 */ 106 */
97enum uc_todo { 107enum uc_todo {
@@ -102,13 +112,13 @@ enum uc_todo {
102 112
103/** 113/**
104 * struct ccw driver - device driver for channel attached devices 114 * struct ccw driver - device driver for channel attached devices
105 * @owner: owning module
106 * @ids: ids supported by this driver 115 * @ids: ids supported by this driver
107 * @probe: function called on probe 116 * @probe: function called on probe
108 * @remove: function called on remove 117 * @remove: function called on remove
109 * @set_online: called when setting device online 118 * @set_online: called when setting device online
110 * @set_offline: called when setting device offline 119 * @set_offline: called when setting device offline
111 * @notify: notify driver of device state changes 120 * @notify: notify driver of device state changes
121 * @path_event: notify driver of channel path events
112 * @shutdown: called at device shutdown 122 * @shutdown: called at device shutdown
113 * @prepare: prepare for pm state transition 123 * @prepare: prepare for pm state transition
114 * @complete: undo work done in @prepare 124 * @complete: undo work done in @prepare
@@ -117,16 +127,15 @@ enum uc_todo {
117 * @restore: callback for restoring after hibernation 127 * @restore: callback for restoring after hibernation
118 * @uc_handler: callback for unit check handler 128 * @uc_handler: callback for unit check handler
119 * @driver: embedded device driver structure 129 * @driver: embedded device driver structure
120 * @name: device driver name
121 */ 130 */
122struct ccw_driver { 131struct ccw_driver {
123 struct module *owner;
124 struct ccw_device_id *ids; 132 struct ccw_device_id *ids;
125 int (*probe) (struct ccw_device *); 133 int (*probe) (struct ccw_device *);
126 void (*remove) (struct ccw_device *); 134 void (*remove) (struct ccw_device *);
127 int (*set_online) (struct ccw_device *); 135 int (*set_online) (struct ccw_device *);
128 int (*set_offline) (struct ccw_device *); 136 int (*set_offline) (struct ccw_device *);
129 int (*notify) (struct ccw_device *, int); 137 int (*notify) (struct ccw_device *, int);
138 void (*path_event) (struct ccw_device *, int *);
130 void (*shutdown) (struct ccw_device *); 139 void (*shutdown) (struct ccw_device *);
131 int (*prepare) (struct ccw_device *); 140 int (*prepare) (struct ccw_device *);
132 void (*complete) (struct ccw_device *); 141 void (*complete) (struct ccw_device *);
@@ -135,7 +144,6 @@ struct ccw_driver {
135 int (*restore)(struct ccw_device *); 144 int (*restore)(struct ccw_device *);
136 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); 145 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
137 struct device_driver driver; 146 struct device_driver driver;
138 char *name;
139}; 147};
140 148
141extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 149extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
@@ -192,6 +200,8 @@ int ccw_device_tm_start_timeout(struct ccw_device *, struct tcw *,
192 unsigned long, u8, int); 200 unsigned long, u8, int);
193int ccw_device_tm_intrg(struct ccw_device *cdev); 201int ccw_device_tm_intrg(struct ccw_device *cdev);
194 202
203int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
204
195extern int ccw_device_set_online(struct ccw_device *cdev); 205extern int ccw_device_set_online(struct ccw_device *cdev);
196extern int ccw_device_set_offline(struct ccw_device *cdev); 206extern int ccw_device_set_offline(struct ccw_device *cdev);
197 207
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index c79c1e787b86..f2ea2c56a7e1 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -29,8 +29,6 @@ struct ccwgroup_device {
29 29
30/** 30/**
31 * struct ccwgroup_driver - driver for ccw group devices 31 * struct ccwgroup_driver - driver for ccw group devices
32 * @owner: driver owner
33 * @name: driver name
34 * @max_slaves: maximum number of slave devices 32 * @max_slaves: maximum number of slave devices
35 * @driver_id: unique id 33 * @driver_id: unique id
36 * @probe: function called on probe 34 * @probe: function called on probe
@@ -46,8 +44,6 @@ struct ccwgroup_device {
46 * @driver: embedded driver structure 44 * @driver: embedded driver structure
47 */ 45 */
48struct ccwgroup_driver { 46struct ccwgroup_driver {
49 struct module *owner;
50 char *name;
51 int max_slaves; 47 int max_slaves;
52 unsigned long driver_id; 48 unsigned long driver_id;
53 49
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index e34347d567a6..fc50a3342da3 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -183,7 +183,7 @@ struct esw3 {
183 * The irb that is handed to the device driver when an interrupt occurs. For 183 * The irb that is handed to the device driver when an interrupt occurs. For
184 * solicited interrupts, the common I/O layer already performs checks whether 184 * solicited interrupts, the common I/O layer already performs checks whether
185 * a field is valid; a field not being valid is always passed as %0. 185 * a field is valid; a field not being valid is always passed as %0.
186 * If a unit check occured, @ecw may contain sense data; this is retrieved 186 * If a unit check occurred, @ecw may contain sense data; this is retrieved
187 * by the common I/O layer itself if the device doesn't support concurrent 187 * by the common I/O layer itself if the device doesn't support concurrent
188 * sense (so that the device driver never needs to perform basic sene itself). 188 * sense (so that the device driver never needs to perform basic sene itself).
189 * For unsolicited interrupts, the irb is passed as-is (expect for sense data, 189 * For unsolicited interrupts, the irb is passed as-is (expect for sense data,
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..81d7908416cf
--- /dev/null
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -0,0 +1,224 @@
1/*
2 * Copyright IBM Corp. 1999, 2011
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#ifndef __ASM_CMPXCHG_H
8#define __ASM_CMPXCHG_H
9
10#include <linux/types.h>
11
12extern void __xchg_called_with_bad_pointer(void);
13
14static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
15{
16 unsigned long addr, old;
17 int shift;
18
19 switch (size) {
20 case 1:
21 addr = (unsigned long) ptr;
22 shift = (3 ^ (addr & 3)) << 3;
23 addr ^= addr & 3;
24 asm volatile(
25 " l %0,%4\n"
26 "0: lr 0,%0\n"
27 " nr 0,%3\n"
28 " or 0,%2\n"
29 " cs %0,0,%4\n"
30 " jl 0b\n"
31 : "=&d" (old), "=Q" (*(int *) addr)
32 : "d" (x << shift), "d" (~(255 << shift)),
33 "Q" (*(int *) addr) : "memory", "cc", "0");
34 return old >> shift;
35 case 2:
36 addr = (unsigned long) ptr;
37 shift = (2 ^ (addr & 2)) << 3;
38 addr ^= addr & 2;
39 asm volatile(
40 " l %0,%4\n"
41 "0: lr 0,%0\n"
42 " nr 0,%3\n"
43 " or 0,%2\n"
44 " cs %0,0,%4\n"
45 " jl 0b\n"
46 : "=&d" (old), "=Q" (*(int *) addr)
47 : "d" (x << shift), "d" (~(65535 << shift)),
48 "Q" (*(int *) addr) : "memory", "cc", "0");
49 return old >> shift;
50 case 4:
51 asm volatile(
52 " l %0,%3\n"
53 "0: cs %0,%2,%3\n"
54 " jl 0b\n"
55 : "=&d" (old), "=Q" (*(int *) ptr)
56 : "d" (x), "Q" (*(int *) ptr)
57 : "memory", "cc");
58 return old;
59#ifdef CONFIG_64BIT
60 case 8:
61 asm volatile(
62 " lg %0,%3\n"
63 "0: csg %0,%2,%3\n"
64 " jl 0b\n"
65 : "=&d" (old), "=m" (*(long *) ptr)
66 : "d" (x), "Q" (*(long *) ptr)
67 : "memory", "cc");
68 return old;
69#endif /* CONFIG_64BIT */
70 }
71 __xchg_called_with_bad_pointer();
72 return x;
73}
74
75#define xchg(ptr, x) \
76({ \
77 __typeof__(*(ptr)) __ret; \
78 __ret = (__typeof__(*(ptr))) \
79 __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
80 __ret; \
81})
82
83/*
84 * Atomic compare and exchange. Compare OLD with MEM, if identical,
85 * store NEW in MEM. Return the initial value in MEM. Success is
86 * indicated by comparing RETURN with OLD.
87 */
88
89#define __HAVE_ARCH_CMPXCHG
90
91extern void __cmpxchg_called_with_bad_pointer(void);
92
93static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
94 unsigned long new, int size)
95{
96 unsigned long addr, prev, tmp;
97 int shift;
98
99 switch (size) {
100 case 1:
101 addr = (unsigned long) ptr;
102 shift = (3 ^ (addr & 3)) << 3;
103 addr ^= addr & 3;
104 asm volatile(
105 " l %0,%2\n"
106 "0: nr %0,%5\n"
107 " lr %1,%0\n"
108 " or %0,%3\n"
109 " or %1,%4\n"
110 " cs %0,%1,%2\n"
111 " jnl 1f\n"
112 " xr %1,%0\n"
113 " nr %1,%5\n"
114 " jnz 0b\n"
115 "1:"
116 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
117 : "d" (old << shift), "d" (new << shift),
118 "d" (~(255 << shift)), "Q" (*(int *) ptr)
119 : "memory", "cc");
120 return prev >> shift;
121 case 2:
122 addr = (unsigned long) ptr;
123 shift = (2 ^ (addr & 2)) << 3;
124 addr ^= addr & 2;
125 asm volatile(
126 " l %0,%2\n"
127 "0: nr %0,%5\n"
128 " lr %1,%0\n"
129 " or %0,%3\n"
130 " or %1,%4\n"
131 " cs %0,%1,%2\n"
132 " jnl 1f\n"
133 " xr %1,%0\n"
134 " nr %1,%5\n"
135 " jnz 0b\n"
136 "1:"
137 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
138 : "d" (old << shift), "d" (new << shift),
139 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
140 : "memory", "cc");
141 return prev >> shift;
142 case 4:
143 asm volatile(
144 " cs %0,%3,%1\n"
145 : "=&d" (prev), "=Q" (*(int *) ptr)
146 : "0" (old), "d" (new), "Q" (*(int *) ptr)
147 : "memory", "cc");
148 return prev;
149#ifdef CONFIG_64BIT
150 case 8:
151 asm volatile(
152 " csg %0,%3,%1\n"
153 : "=&d" (prev), "=Q" (*(long *) ptr)
154 : "0" (old), "d" (new), "Q" (*(long *) ptr)
155 : "memory", "cc");
156 return prev;
157#endif /* CONFIG_64BIT */
158 }
159 __cmpxchg_called_with_bad_pointer();
160 return old;
161}
162
163#define cmpxchg(ptr, o, n) \
164 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
165 (unsigned long)(n), sizeof(*(ptr))))
166
167#ifdef CONFIG_64BIT
168#define cmpxchg64(ptr, o, n) \
169({ \
170 cmpxchg((ptr), (o), (n)); \
171})
172#else /* CONFIG_64BIT */
173static inline unsigned long long __cmpxchg64(void *ptr,
174 unsigned long long old,
175 unsigned long long new)
176{
177 register_pair rp_old = {.pair = old};
178 register_pair rp_new = {.pair = new};
179
180 asm volatile(
181 " cds %0,%2,%1"
182 : "+&d" (rp_old), "=Q" (ptr)
183 : "d" (rp_new), "Q" (ptr)
184 : "cc");
185 return rp_old.pair;
186}
187#define cmpxchg64(ptr, o, n) \
188 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
189 (unsigned long long)(o), \
190 (unsigned long long)(n)))
191#endif /* CONFIG_64BIT */
192
193#include <asm-generic/cmpxchg-local.h>
194
195static inline unsigned long __cmpxchg_local(void *ptr,
196 unsigned long old,
197 unsigned long new, int size)
198{
199 switch (size) {
200 case 1:
201 case 2:
202 case 4:
203#ifdef CONFIG_64BIT
204 case 8:
205#endif
206 return __cmpxchg(ptr, old, new, size);
207 default:
208 return __cmpxchg_local_generic(ptr, old, new, size);
209 }
210
211 return old;
212}
213
214/*
215 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
216 * them available.
217 */
218#define cmpxchg_local(ptr, o, n) \
219 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
220 (unsigned long)(n), sizeof(*(ptr))))
221
222#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
223
224#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index a875c2f542e1..da359ca6fe55 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -169,7 +169,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
169 169
170static inline int is_compat_task(void) 170static inline int is_compat_task(void)
171{ 171{
172 return test_thread_flag(TIF_31BIT); 172 return is_32bit_task();
173} 173}
174 174
175#else 175#else
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index 471234b90574..e0b69540216f 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -20,7 +20,7 @@ struct cpuid
20 unsigned int ident : 24; 20 unsigned int ident : 24;
21 unsigned int machine : 16; 21 unsigned int machine : 16;
22 unsigned int unused : 16; 22 unsigned int unused : 16;
23} __packed; 23} __attribute__ ((packed, aligned(8)));
24 24
25#endif /* __ASSEMBLY__ */ 25#endif /* __ASSEMBLY__ */
26#endif /* _ASM_S390_CPU_H */ 26#endif /* _ASM_S390_CPU_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 8b1a52a137c5..081434878296 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime)
73} 73}
74 74
75/* 75/*
76 * Convert cputime to milliseconds and back. 76 * Convert cputime to microseconds and back.
77 */ 77 */
78static inline unsigned int 78static inline unsigned int
79cputime_to_msecs(const cputime_t cputime) 79cputime_to_usecs(const cputime_t cputime)
80{ 80{
81 return cputime_div(cputime, 4096000); 81 return cputime_div(cputime, 4096);
82} 82}
83 83
84static inline cputime_t 84static inline cputime_t
85msecs_to_cputime(const unsigned int m) 85usecs_to_cputime(const unsigned int m)
86{ 86{
87 return (cputime_t) m * 4096000; 87 return (cputime_t) m * 4096;
88} 88}
89 89
90/* 90/*
@@ -202,7 +202,7 @@ static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
202 202
203static inline int s390_nohz_delay(int cpu) 203static inline int s390_nohz_delay(int cpu)
204{ 204{
205 return per_cpu(s390_idle, cpu).nohz_delay != 0; 205 return __get_cpu_var(s390_idle).nohz_delay != 0;
206} 206}
207 207
208#define arch_needs_cpu(cpu) s390_nohz_delay(cpu) 208#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 218bce81ec70..0be28efe5b66 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -73,6 +73,7 @@ typedef struct dasd_information2_t {
73 * 0x02: use diag discipline (diag) 73 * 0x02: use diag discipline (diag)
74 * 0x04: set the device initially online (internal use only) 74 * 0x04: set the device initially online (internal use only)
75 * 0x08: enable ERP related logging 75 * 0x08: enable ERP related logging
76 * 0x20: give access to raw eckd data
76 */ 77 */
77#define DASD_FEATURE_DEFAULT 0x00 78#define DASD_FEATURE_DEFAULT 0x00
78#define DASD_FEATURE_READONLY 0x01 79#define DASD_FEATURE_READONLY 0x01
@@ -80,6 +81,8 @@ typedef struct dasd_information2_t {
80#define DASD_FEATURE_INITIAL_ONLINE 0x04 81#define DASD_FEATURE_INITIAL_ONLINE 0x04
81#define DASD_FEATURE_ERPLOG 0x08 82#define DASD_FEATURE_ERPLOG 0x08
82#define DASD_FEATURE_FAILFAST 0x10 83#define DASD_FEATURE_FAILFAST 0x10
84#define DASD_FEATURE_FAILONSLCK 0x20
85#define DASD_FEATURE_USERAW 0x40
83 86
84#define DASD_PARTN_BITS 2 87#define DASD_PARTN_BITS 2
85 88
@@ -217,6 +220,25 @@ typedef struct dasd_symmio_parms {
217 int rssd_result_len; 220 int rssd_result_len;
218} __attribute__ ((packed)) dasd_symmio_parms_t; 221} __attribute__ ((packed)) dasd_symmio_parms_t;
219 222
223/*
224 * Data returned by Sense Path Group ID (SNID)
225 */
226struct dasd_snid_data {
227 struct {
228 __u8 group:2;
229 __u8 reserve:2;
230 __u8 mode:1;
231 __u8 res:3;
232 } __attribute__ ((packed)) path_state;
233 __u8 pgid[11];
234} __attribute__ ((packed));
235
236struct dasd_snid_ioctl_data {
237 struct dasd_snid_data data;
238 __u8 path_mask;
239} __attribute__ ((packed));
240
241
220/******************************************************************************** 242/********************************************************************************
221 * SECTION: Definition of IOCTLs 243 * SECTION: Definition of IOCTLs
222 * 244 *
@@ -261,25 +283,10 @@ typedef struct dasd_symmio_parms {
261/* Set Attributes (cache operations) */ 283/* Set Attributes (cache operations) */
262#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) 284#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
263 285
286/* Get Sense Path Group ID (SNID) data */
287#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
288
264#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t) 289#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t)
265 290
266#endif /* DASD_H */ 291#endif /* DASD_H */
267 292
268/*
269 * Overrides for Emacs so that we follow Linus's tabbing style.
270 * Emacs will notice this stuff at the end of the file and automatically
271 * adjust the settings for this buffer only. This must remain at the end
272 * of the file.
273 * ---------------------------------------------------------------------------
274 * Local variables:
275 * c-indent-level: 4
276 * c-brace-imaginary-offset: 0
277 * c-brace-offset: -4
278 * c-argdecl-indent: 4
279 * c-label-offset: -4
280 * c-continued-statement-offset: 4
281 * c-continued-brace-offset: 0
282 * indent-tabs-mode: nil
283 * tab-width: 8
284 * End:
285 */
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 8a096b83f51f..0e3b35f96be1 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -14,10 +14,12 @@
14#ifndef _S390_DELAY_H 14#ifndef _S390_DELAY_H
15#define _S390_DELAY_H 15#define _S390_DELAY_H
16 16
17extern void __udelay(unsigned long long usecs); 17void __ndelay(unsigned long long nsecs);
18extern void udelay_simple(unsigned long long usecs); 18void __udelay(unsigned long long usecs);
19extern void __delay(unsigned long loops); 19void udelay_simple(unsigned long long usecs);
20void __delay(unsigned long loops);
20 21
22#define ndelay(n) __ndelay((unsigned long long) (n))
21#define udelay(n) __udelay((unsigned long long) (n)) 23#define udelay(n) __udelay((unsigned long long) (n))
22#define mdelay(n) __udelay((unsigned long long) (n) * 1000) 24#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
23 25
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 72b2e2f2d32d..7e91c58072e2 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -9,9 +9,22 @@
9#define _ASM_S390_DIAG_H 9#define _ASM_S390_DIAG_H
10 10
11/* 11/*
12 * Diagnose 10: Release pages 12 * Diagnose 10: Release page range
13 */ 13 */
14extern void diag10(unsigned long addr); 14static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
15{
16 unsigned long start_addr, end_addr;
17
18 start_addr = start_pfn << PAGE_SHIFT;
19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
20
21 asm volatile(
22 "0: diag %0,%1,0x10\n"
23 "1:\n"
24 EX_TABLE(0b, 1b)
25 EX_TABLE(1b, 1b)
26 : : "a" (start_addr), "a" (end_addr));
27}
15 28
16/* 29/*
17 * Diagnose 14: Input spool file manipulation 30 * Diagnose 14: Input spool file manipulation
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 354d42616c7e..64b61bf72e93 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -161,7 +161,9 @@ extern unsigned int vdso_enabled;
161 use of this is to invoke "./ld.so someprog" to test out a new version of 161 use of this is to invoke "./ld.so someprog" to test out a new version of
162 the loader. We need to make sure that it is out of the way of the program 162 the loader. We need to make sure that it is out of the way of the program
163 that it will "exec", and that there is sufficient room for the brk. */ 163 that it will "exec", and that there is sufficient room for the brk. */
164#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) 164
165extern unsigned long randomize_et_dyn(unsigned long base);
166#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
165 167
166/* This yields a mask that user programs can use to figure out what 168/* This yields a mask that user programs can use to figure out what
167 instruction set this CPU supports. */ 169 instruction set this CPU supports. */
@@ -194,17 +196,7 @@ do { \
194} while (0) 196} while (0)
195#endif /* __s390x__ */ 197#endif /* __s390x__ */
196 198
197/* 199#define STACK_RND_MASK 0x7ffUL
198 * An executable for which elf_read_implies_exec() returns TRUE will
199 * have the READ_IMPLIES_EXEC personality flag set automatically.
200 */
201#define elf_read_implies_exec(ex, executable_stack) \
202({ \
203 if (current->mm->context.noexec && \
204 executable_stack != EXSTACK_DISABLE_X) \
205 disable_noexec(current->mm, current); \
206 current->mm->context.noexec == 0; \
207})
208 200
209#define ARCH_DLINFO \ 201#define ARCH_DLINFO \
210do { \ 202do { \
@@ -218,4 +210,7 @@ struct linux_binprm;
218#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 210#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
219int arch_setup_additional_pages(struct linux_binprm *, int); 211int arch_setup_additional_pages(struct linux_binprm *, int);
220 212
213extern unsigned long arch_randomize_brk(struct mm_struct *mm);
214#define arch_randomize_brk arch_randomize_brk
215
221#endif 216#endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 96c14a9102b8..b7931faaef6d 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,25 +4,20 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6extern void _mcount(void); 6extern void _mcount(void);
7extern unsigned long ftrace_dyn_func;
8 7
9struct dyn_arch_ftrace { }; 8struct dyn_arch_ftrace { };
10 9
11#define MCOUNT_ADDR ((long)_mcount) 10#define MCOUNT_ADDR ((long)_mcount)
12 11
13#ifdef CONFIG_64BIT 12#ifdef CONFIG_64BIT
14#define MCOUNT_OFFSET_RET 18 13#define MCOUNT_INSN_SIZE 12
15#define MCOUNT_INSN_SIZE 24
16#define MCOUNT_OFFSET 14
17#else 14#else
18#define MCOUNT_OFFSET_RET 26 15#define MCOUNT_INSN_SIZE 20
19#define MCOUNT_INSN_SIZE 30
20#define MCOUNT_OFFSET 8
21#endif 16#endif
22 17
23static inline unsigned long ftrace_call_adjust(unsigned long addr) 18static inline unsigned long ftrace_call_adjust(unsigned long addr)
24{ 19{
25 return addr - MCOUNT_OFFSET; 20 return addr;
26} 21}
27 22
28#endif /* __ASSEMBLY__ */ 23#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 5c5d02de49e9..81cf36b691f1 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -7,7 +7,7 @@
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <asm/errno.h> 8#include <asm/errno.h>
9 9
10static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 10static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
11{ 11{
12 int op = (encoded_op >> 28) & 7; 12 int op = (encoded_op >> 28) & 7;
13 int cmp = (encoded_op >> 24) & 15; 13 int cmp = (encoded_op >> 24) & 15;
@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg; 19 oparg = 1 << oparg;
20 20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
22 return -EFAULT; 22 return -EFAULT;
23 23
24 pagefault_disable(); 24 pagefault_disable();
@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
39 return ret; 39 return ret;
40} 40}
41 41
42static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, 42static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
43 int oldval, int newval) 43 u32 oldval, u32 newval)
44{ 44{
45 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 45 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
46 return -EFAULT; 46 return -EFAULT;
47 47
48 return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); 48 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
49} 49}
50 50
51#endif /* __KERNEL__ */ 51#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 498bc3892385..e4155d3eb2cb 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -12,10 +12,6 @@
12#ifndef __ASM_HARDIRQ_H 12#ifndef __ASM_HARDIRQ_H
13#define __ASM_HARDIRQ_H 13#define __ASM_HARDIRQ_H
14 14
15#include <linux/threads.h>
16#include <linux/sched.h>
17#include <linux/cache.h>
18#include <linux/interrupt.h>
19#include <asm/lowcore.h> 15#include <asm/lowcore.h>
20 16
21#define local_softirq_pending() (S390_lowcore.softirq_pending) 17#define local_softirq_pending() (S390_lowcore.softirq_pending)
@@ -25,20 +21,4 @@
25 21
26#define HARDIRQ_BITS 8 22#define HARDIRQ_BITS 8
27 23
28void clock_comparator_work(void);
29
30static inline unsigned long long local_tick_disable(void)
31{
32 unsigned long long old;
33
34 old = S390_lowcore.clock_comparator;
35 S390_lowcore.clock_comparator = -1ULL;
36 return old;
37}
38
39static inline void local_tick_enable(unsigned long long comp)
40{
41 S390_lowcore.clock_comparator = comp;
42}
43
44#endif /* __ASM_HARDIRQ_H */ 24#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index bb8343d157bc..799ed0f1643d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -37,32 +37,6 @@ static inline int prepare_hugepage_range(struct file *file,
37int arch_prepare_hugepage(struct page *page); 37int arch_prepare_hugepage(struct page *page);
38void arch_release_hugepage(struct page *page); 38void arch_release_hugepage(struct page *page);
39 39
40static inline pte_t pte_mkhuge(pte_t pte)
41{
42 /*
43 * PROT_NONE needs to be remapped from the pte type to the ste type.
44 * The HW invalid bit is also different for pte and ste. The pte
45 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
46 * bit, so we don't have to clear it.
47 */
48 if (pte_val(pte) & _PAGE_INVALID) {
49 if (pte_val(pte) & _PAGE_SWT)
50 pte_val(pte) |= _HPAGE_TYPE_NONE;
51 pte_val(pte) |= _SEGMENT_ENTRY_INV;
52 }
53 /*
54 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
55 * table entry.
56 */
57 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
58 /*
59 * Also set the change-override bit because we don't need dirty bit
60 * tracking for hugetlbfs pages.
61 */
62 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
63 return pte;
64}
65
66static inline pte_t huge_pte_wrprotect(pte_t pte) 40static inline pte_t huge_pte_wrprotect(pte_t pte)
67{ 41{
68 pte_val(pte) |= _PAGE_RO; 42 pte_val(pte) |= _PAGE_RO;
@@ -137,21 +111,10 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
137{ 111{
138 pmd_t *pmdp = (pmd_t *) ptep; 112 pmd_t *pmdp = (pmd_t *) ptep;
139 113
140 if (!MACHINE_HAS_IDTE) { 114 if (MACHINE_HAS_IDTE)
141 __pmd_csp(pmdp);
142 if (mm->context.noexec) {
143 pmdp = get_shadow_table(pmdp);
144 __pmd_csp(pmdp);
145 }
146 return;
147 }
148
149 __pmd_idte(address, pmdp);
150 if (mm->context.noexec) {
151 pmdp = get_shadow_table(pmdp);
152 __pmd_idte(address, pmdp); 115 __pmd_idte(address, pmdp);
153 } 116 else
154 return; 117 __pmd_csp(pmdp);
155} 118}
156 119
157#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
diff --git a/arch/s390/include/asm/ioctls.h b/arch/s390/include/asm/ioctls.h
index 2f3d8736361f..960a4c1ebdf1 100644
--- a/arch/s390/include/asm/ioctls.h
+++ b/arch/s390/include/asm/ioctls.h
@@ -1,94 +1,8 @@
1/*
2 * include/asm-s390/ioctls.h
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/ioctls.h"
7 */
8
9#ifndef __ARCH_S390_IOCTLS_H__ 1#ifndef __ARCH_S390_IOCTLS_H__
10#define __ARCH_S390_IOCTLS_H__ 2#define __ARCH_S390_IOCTLS_H__
11 3
12#include <asm/ioctl.h>
13
14/* 0x54 is just a magic number to make these relatively unique ('T') */
15
16#define TCGETS 0x5401
17#define TCSETS 0x5402
18#define TCSETSW 0x5403
19#define TCSETSF 0x5404
20#define TCGETA 0x5405
21#define TCSETA 0x5406
22#define TCSETAW 0x5407
23#define TCSETAF 0x5408
24#define TCSBRK 0x5409
25#define TCXONC 0x540A
26#define TCFLSH 0x540B
27#define TIOCEXCL 0x540C
28#define TIOCNXCL 0x540D
29#define TIOCSCTTY 0x540E
30#define TIOCGPGRP 0x540F
31#define TIOCSPGRP 0x5410
32#define TIOCOUTQ 0x5411
33#define TIOCSTI 0x5412
34#define TIOCGWINSZ 0x5413
35#define TIOCSWINSZ 0x5414
36#define TIOCMGET 0x5415
37#define TIOCMBIS 0x5416
38#define TIOCMBIC 0x5417
39#define TIOCMSET 0x5418
40#define TIOCGSOFTCAR 0x5419
41#define TIOCSSOFTCAR 0x541A
42#define FIONREAD 0x541B
43#define TIOCINQ FIONREAD
44#define TIOCLINUX 0x541C
45#define TIOCCONS 0x541D
46#define TIOCGSERIAL 0x541E
47#define TIOCSSERIAL 0x541F
48#define TIOCPKT 0x5420
49#define FIONBIO 0x5421
50#define TIOCNOTTY 0x5422
51#define TIOCSETD 0x5423
52#define TIOCGETD 0x5424
53#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
54#define TIOCSBRK 0x5427 /* BSD compatibility */
55#define TIOCCBRK 0x5428 /* BSD compatibility */
56#define TIOCGSID 0x5429 /* Return the session ID of FD */
57#define TCGETS2 _IOR('T',0x2A, struct termios2)
58#define TCSETS2 _IOW('T',0x2B, struct termios2)
59#define TCSETSW2 _IOW('T',0x2C, struct termios2)
60#define TCSETSF2 _IOW('T',0x2D, struct termios2)
61#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
62#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
63#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
64
65#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
66#define FIOCLEX 0x5451
67#define FIOASYNC 0x5452
68#define TIOCSERCONFIG 0x5453
69#define TIOCSERGWILD 0x5454
70#define TIOCSERSWILD 0x5455
71#define TIOCGLCKTRMIOS 0x5456
72#define TIOCSLCKTRMIOS 0x5457
73#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
74#define TIOCSERGETLSR 0x5459 /* Get line status register */
75#define TIOCSERGETMULTI 0x545A /* Get multiport config */
76#define TIOCSERSETMULTI 0x545B /* Set multiport config */
77
78#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
79#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
80#define FIOQSIZE 0x545E 4#define FIOQSIZE 0x545E
81 5
82/* Used for packet mode */ 6#include <asm-generic/ioctls.h>
83#define TIOCPKT_DATA 0
84#define TIOCPKT_FLUSHREAD 1
85#define TIOCPKT_FLUSHWRITE 2
86#define TIOCPKT_STOP 4
87#define TIOCPKT_START 8
88#define TIOCPKT_NOSTOP 16
89#define TIOCPKT_DOSTOP 32
90#define TIOCPKT_IOCTL 64
91
92#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
93 7
94#endif 8#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 7da991a858f8..ba7b01c726a3 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,23 +1,42 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#ifdef __KERNEL__
5#include <linux/hardirq.h> 4#include <linux/hardirq.h>
6 5#include <linux/types.h>
7/*
8 * the definition of irqs has changed in 2.5.46:
9 * NR_IRQS is no longer the number of i/o
10 * interrupts (65536), but rather the number
11 * of interrupt classes (2).
12 * Only external and i/o interrupts make much sense here (CH).
13 */
14 6
15enum interruption_class { 7enum interruption_class {
16 EXTERNAL_INTERRUPT, 8 EXTERNAL_INTERRUPT,
17 IO_INTERRUPT, 9 IO_INTERRUPT,
18 10 EXTINT_CLK,
11 EXTINT_IPI,
12 EXTINT_TMR,
13 EXTINT_TLA,
14 EXTINT_PFL,
15 EXTINT_DSD,
16 EXTINT_VRT,
17 EXTINT_SCP,
18 EXTINT_IUC,
19 EXTINT_CPM,
20 IOINT_QAI,
21 IOINT_QDI,
22 IOINT_DAS,
23 IOINT_C15,
24 IOINT_C70,
25 IOINT_TAP,
26 IOINT_VMR,
27 IOINT_LCS,
28 IOINT_CLW,
29 IOINT_CTC,
30 IOINT_APB,
31 NMI_NMI,
19 NR_IRQS, 32 NR_IRQS,
20}; 33};
21 34
22#endif /* __KERNEL__ */ 35typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
23#endif 36
37int register_external_interrupt(u16 code, ext_int_handler_t handler);
38int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
39void service_subclass_irq_register(void);
40void service_subclass_irq_unregister(void);
41
42#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 15b3ac253898..865d6d891ace 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10 10
11/* store then or system mask. */ 11/* store then OR system mask. */
12#define __raw_local_irq_stosm(__or) \ 12#define __arch_local_irq_stosm(__or) \
13({ \ 13({ \
14 unsigned long __mask; \ 14 unsigned long __mask; \
15 asm volatile( \ 15 asm volatile( \
@@ -18,8 +18,8 @@
18 __mask; \ 18 __mask; \
19}) 19})
20 20
21/* store then and system mask. */ 21/* store then AND system mask. */
22#define __raw_local_irq_stnsm(__and) \ 22#define __arch_local_irq_stnsm(__and) \
23({ \ 23({ \
24 unsigned long __mask; \ 24 unsigned long __mask; \
25 asm volatile( \ 25 asm volatile( \
@@ -29,39 +29,44 @@
29}) 29})
30 30
31/* set system mask. */ 31/* set system mask. */
32#define __raw_local_irq_ssm(__mask) \ 32static inline void __arch_local_irq_ssm(unsigned long flags)
33({ \ 33{
34 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (flags) : "memory");
35}) 35}
36 36
37/* interrupt control.. */ 37static inline unsigned long arch_local_save_flags(void)
38static inline unsigned long raw_local_irq_enable(void)
39{ 38{
40 return __raw_local_irq_stosm(0x03); 39 return __arch_local_irq_stosm(0x00);
41} 40}
42 41
43static inline unsigned long raw_local_irq_disable(void) 42static inline unsigned long arch_local_irq_save(void)
44{ 43{
45 return __raw_local_irq_stnsm(0xfc); 44 return __arch_local_irq_stnsm(0xfc);
46} 45}
47 46
48#define raw_local_save_flags(x) \ 47static inline void arch_local_irq_disable(void)
49do { \ 48{
50 typecheck(unsigned long, x); \ 49 arch_local_irq_save();
51 (x) = __raw_local_irq_stosm(0x00); \ 50}
52} while (0)
53 51
54static inline void raw_local_irq_restore(unsigned long flags) 52static inline void arch_local_irq_enable(void)
55{ 53{
56 __raw_local_irq_ssm(flags); 54 __arch_local_irq_stosm(0x03);
57} 55}
58 56
59static inline int raw_irqs_disabled_flags(unsigned long flags) 57static inline void arch_local_irq_restore(unsigned long flags)
58{
59 __arch_local_irq_ssm(flags);
60}
61
62static inline bool arch_irqs_disabled_flags(unsigned long flags)
60{ 63{
61 return !(flags & (3UL << (BITS_PER_LONG - 8))); 64 return !(flags & (3UL << (BITS_PER_LONG - 8)));
62} 65}
63 66
64/* For spinlocks etc */ 67static inline bool arch_irqs_disabled(void)
65#define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) 68{
69 return arch_irqs_disabled_flags(arch_local_save_flags());
70}
66 71
67#endif /* __ASM_IRQFLAGS_H */ 72#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
new file mode 100644
index 000000000000..95a6cf2b5b67
--- /dev/null
+++ b/arch/s390/include/asm/jump_label.h
@@ -0,0 +1,37 @@
1#ifndef _ASM_S390_JUMP_LABEL_H
2#define _ASM_S390_JUMP_LABEL_H
3
4#include <linux/types.h>
5
6#define JUMP_LABEL_NOP_SIZE 6
7
8#ifdef CONFIG_64BIT
9#define ASM_PTR ".quad"
10#define ASM_ALIGN ".balign 8"
11#else
12#define ASM_PTR ".long"
13#define ASM_ALIGN ".balign 4"
14#endif
15
16static __always_inline bool arch_static_branch(struct jump_label_key *key)
17{
18 asm goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n"
22 ".popsection\n"
23 : : "X" (key) : : label);
24 return false;
25label:
26 return true;
27}
28
29typedef unsigned long jump_label_t;
30
31struct jump_entry {
32 jump_label_t code;
33 jump_label_t target;
34 jump_label_t key;
35};
36
37#endif
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 330f68caffe4..a231a9439c4b 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -31,7 +31,6 @@
31#include <linux/ptrace.h> 31#include <linux/ptrace.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33 33
34#define __ARCH_WANT_KPROBES_INSN_SLOT
35struct pt_regs; 34struct pt_regs;
36struct kprobe; 35struct kprobe;
37 36
@@ -58,23 +57,12 @@ typedef u16 kprobe_opcode_t;
58/* Architecture specific copy of original instruction */ 57/* Architecture specific copy of original instruction */
59struct arch_specific_insn { 58struct arch_specific_insn {
60 /* copy of original instruction */ 59 /* copy of original instruction */
61 kprobe_opcode_t *insn; 60 kprobe_opcode_t insn[MAX_INSN_SIZE];
62 int fixup;
63 int ilen;
64 int reg;
65}; 61};
66 62
67struct ins_replace_args {
68 kprobe_opcode_t *ptr;
69 kprobe_opcode_t old;
70 kprobe_opcode_t new;
71};
72struct prev_kprobe { 63struct prev_kprobe {
73 struct kprobe *kp; 64 struct kprobe *kp;
74 unsigned long status; 65 unsigned long status;
75 unsigned long saved_psw;
76 unsigned long kprobe_saved_imask;
77 unsigned long kprobe_saved_ctl[3];
78}; 66};
79 67
80/* per-cpu kprobe control block */ 68/* per-cpu kprobe control block */
@@ -82,17 +70,13 @@ struct kprobe_ctlblk {
82 unsigned long kprobe_status; 70 unsigned long kprobe_status;
83 unsigned long kprobe_saved_imask; 71 unsigned long kprobe_saved_imask;
84 unsigned long kprobe_saved_ctl[3]; 72 unsigned long kprobe_saved_ctl[3];
85 struct pt_regs jprobe_saved_regs;
86 unsigned long jprobe_saved_r14;
87 unsigned long jprobe_saved_r15;
88 struct prev_kprobe prev_kprobe; 73 struct prev_kprobe prev_kprobe;
74 struct pt_regs jprobe_saved_regs;
89 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; 75 kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
90}; 76};
91 77
92void arch_remove_kprobe(struct kprobe *p); 78void arch_remove_kprobe(struct kprobe *p);
93void kretprobe_trampoline(void); 79void kretprobe_trampoline(void);
94int is_prohibited_opcode(kprobe_opcode_t *instruction);
95void get_instruction_type(struct arch_specific_insn *ainsn);
96 80
97int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 81int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
98int kprobe_exceptions_notify(struct notifier_block *self, 82int kprobe_exceptions_notify(struct notifier_block *self,
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h
index acdfdff26611..72f614181eff 100644
--- a/arch/s390/include/asm/kvm_virtio.h
+++ b/arch/s390/include/asm/kvm_virtio.h
@@ -54,4 +54,11 @@ struct kvm_vqconfig {
54 * This is pagesize for historical reasons. */ 54 * This is pagesize for historical reasons. */
55#define KVM_S390_VIRTIO_RING_ALIGN 4096 55#define KVM_S390_VIRTIO_RING_ALIGN 4096
56 56
57
58/* These values are supposed to be in ext_params on an interrupt */
59#define VIRTIO_PARAM_MASK 0xff
60#define VIRTIO_PARAM_VRING_INTERRUPT 0x0
61#define VIRTIO_PARAM_CONFIG_CHANGED 0x1
62#define VIRTIO_PARAM_DEV_ADD 0x2
63
57#endif 64#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 0f97ef2d92ac..228cf0b295db 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -124,7 +124,7 @@ struct _lowcore {
124 /* Address space pointer. */ 124 /* Address space pointer. */
125 __u32 kernel_asce; /* 0x02ac */ 125 __u32 kernel_asce; /* 0x02ac */
126 __u32 user_asce; /* 0x02b0 */ 126 __u32 user_asce; /* 0x02b0 */
127 __u32 user_exec_asce; /* 0x02b4 */ 127 __u32 current_pid; /* 0x02b4 */
128 128
129 /* SMP info area */ 129 /* SMP info area */
130 __u32 cpu_nr; /* 0x02b8 */ 130 __u32 cpu_nr; /* 0x02b8 */
@@ -150,9 +150,10 @@ struct _lowcore {
150 */ 150 */
151 __u32 ipib; /* 0x0e00 */ 151 __u32 ipib; /* 0x0e00 */
152 __u32 ipib_checksum; /* 0x0e04 */ 152 __u32 ipib_checksum; /* 0x0e04 */
153 __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */
153 154
154 /* Align to the top 1k of prefix area */ 155 /* Extended facility list */
155 __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ 156 __u64 stfle_fac_list[32]; /* 0x0f00 */
156} __packed; 157} __packed;
157 158
158#else /* CONFIG_32BIT */ 159#else /* CONFIG_32BIT */
@@ -254,7 +255,7 @@ struct _lowcore {
254 /* Address space pointer. */ 255 /* Address space pointer. */
255 __u64 kernel_asce; /* 0x0310 */ 256 __u64 kernel_asce; /* 0x0310 */
256 __u64 user_asce; /* 0x0318 */ 257 __u64 user_asce; /* 0x0318 */
257 __u64 user_exec_asce; /* 0x0320 */ 258 __u64 current_pid; /* 0x0320 */
258 259
259 /* SMP info area */ 260 /* SMP info area */
260 __u32 cpu_nr; /* 0x0328 */ 261 __u32 cpu_nr; /* 0x0328 */
@@ -285,7 +286,11 @@ struct _lowcore {
285 */ 286 */
286 __u64 ipib; /* 0x0e00 */ 287 __u64 ipib; /* 0x0e00 */
287 __u32 ipib_checksum; /* 0x0e08 */ 288 __u32 ipib_checksum; /* 0x0e08 */
288 __u8 pad_0x0e0c[0x11b8-0x0e0c]; /* 0x0e0c */ 289 __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */
290
291 /* Extended facility list */
292 __u64 stfle_fac_list[32]; /* 0x0f00 */
293 __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */
289 294
290 /* 64 bit extparam used for pfault/diag 250: defined by architecture */ 295 /* 64 bit extparam used for pfault/diag 250: defined by architecture */
291 __u64 ext_params2; /* 0x11B8 */ 296 __u64 ext_params2; /* 0x11B8 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 78522cdefdd4..82d0847896a0 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,19 +5,18 @@ typedef struct {
5 atomic_t attach_count; 5 atomic_t attach_count;
6 unsigned int flush_mm; 6 unsigned int flush_mm;
7 spinlock_t list_lock; 7 spinlock_t list_lock;
8 struct list_head crst_list;
9 struct list_head pgtable_list; 8 struct list_head pgtable_list;
10 unsigned long asce_bits; 9 unsigned long asce_bits;
11 unsigned long asce_limit; 10 unsigned long asce_limit;
12 unsigned long vdso_base; 11 unsigned long vdso_base;
13 int noexec; 12 /* Cloned contexts will be created with extended page tables. */
14 int has_pgste; /* The mmu context has extended page tables */ 13 unsigned int alloc_pgste:1;
15 int alloc_pgste; /* cloned contexts will have extended page tables */ 14 /* The mmu context has extended page tables. */
15 unsigned int has_pgste:1;
16} mm_context_t; 16} mm_context_t;
17 17
18#define INIT_MM_CONTEXT(name) \ 18#define INIT_MM_CONTEXT(name) \
19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
20 .context.crst_list = LIST_HEAD_INIT(name.context.crst_list), \
21 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), 20 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
22 21
23#endif 22#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index a6f0e7cc9cde..5682f160ff82 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
23#ifdef CONFIG_64BIT 23#ifdef CONFIG_64BIT
24 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 24 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
25#endif 25#endif
26 if (current->mm->context.alloc_pgste) { 26 if (current->mm && current->mm->context.alloc_pgste) {
27 /* 27 /*
28 * alloc_pgste indicates, that any NEW context will be created 28 * alloc_pgste indicates, that any NEW context will be created
29 * with extended page tables. The old context is unchanged. The 29 * with extended page tables. The old context is unchanged. The
@@ -35,11 +35,9 @@ static inline int init_new_context(struct task_struct *tsk,
35 * and if has_pgste is set, it will create extended page 35 * and if has_pgste is set, it will create extended page
36 * tables. 36 * tables.
37 */ 37 */
38 mm->context.noexec = 0;
39 mm->context.has_pgste = 1; 38 mm->context.has_pgste = 1;
40 mm->context.alloc_pgste = 1; 39 mm->context.alloc_pgste = 1;
41 } else { 40 } else {
42 mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE);
43 mm->context.has_pgste = 0; 41 mm->context.has_pgste = 0;
44 mm->context.alloc_pgste = 0; 42 mm->context.alloc_pgste = 0;
45 } 43 }
@@ -63,10 +61,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
63 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 61 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
64 if (user_mode != HOME_SPACE_MODE) { 62 if (user_mode != HOME_SPACE_MODE) {
65 /* Load primary space page table origin. */ 63 /* Load primary space page table origin. */
66 pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
67 S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
68 asm volatile(LCTL_OPCODE" 1,1,%0\n" 64 asm volatile(LCTL_OPCODE" 1,1,%0\n"
69 : : "m" (S390_lowcore.user_exec_asce) ); 65 : : "m" (S390_lowcore.user_asce) );
70 } else 66 } else
71 /* Load home space page table origin. */ 67 /* Load home space page table origin. */
72 asm volatile(LCTL_OPCODE" 13,13,%0" 68 asm volatile(LCTL_OPCODE" 13,13,%0"
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
index 458c1f7fbc18..688271f5f2e4 100644
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,3 +7,5 @@
7 */ 7 */
8 8
9#include <asm-generic/mutex-dec.h> 9#include <asm-generic/mutex-dec.h>
10
11#define arch_mutex_cpu_relax() barrier()
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index af650fb47206..accb372ddc7e 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -90,6 +90,7 @@ static inline void copy_page(void *to, void *from)
90 */ 90 */
91 91
92typedef struct { unsigned long pgprot; } pgprot_t; 92typedef struct { unsigned long pgprot; } pgprot_t;
93typedef struct { unsigned long pgste; } pgste_t;
93typedef struct { unsigned long pte; } pte_t; 94typedef struct { unsigned long pte; } pte_t;
94typedef struct { unsigned long pmd; } pmd_t; 95typedef struct { unsigned long pmd; } pmd_t;
95typedef struct { unsigned long pud; } pud_t; 96typedef struct { unsigned long pud; } pud_t;
@@ -97,35 +98,91 @@ typedef struct { unsigned long pgd; } pgd_t;
97typedef pte_t *pgtable_t; 98typedef pte_t *pgtable_t;
98 99
99#define pgprot_val(x) ((x).pgprot) 100#define pgprot_val(x) ((x).pgprot)
101#define pgste_val(x) ((x).pgste)
100#define pte_val(x) ((x).pte) 102#define pte_val(x) ((x).pte)
101#define pmd_val(x) ((x).pmd) 103#define pmd_val(x) ((x).pmd)
102#define pud_val(x) ((x).pud) 104#define pud_val(x) ((x).pud)
103#define pgd_val(x) ((x).pgd) 105#define pgd_val(x) ((x).pgd)
104 106
107#define __pgste(x) ((pgste_t) { (x) } )
105#define __pte(x) ((pte_t) { (x) } ) 108#define __pte(x) ((pte_t) { (x) } )
106#define __pmd(x) ((pmd_t) { (x) } ) 109#define __pmd(x) ((pmd_t) { (x) } )
110#define __pud(x) ((pud_t) { (x) } )
107#define __pgd(x) ((pgd_t) { (x) } ) 111#define __pgd(x) ((pgd_t) { (x) } )
108#define __pgprot(x) ((pgprot_t) { (x) } ) 112#define __pgprot(x) ((pgprot_t) { (x) } )
109 113
110static inline void 114static inline void page_set_storage_key(unsigned long addr,
111page_set_storage_key(unsigned long addr, unsigned int skey) 115 unsigned char skey, int mapped)
112{ 116{
113 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 117 if (!mapped)
118 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
119 : : "d" (skey), "a" (addr));
120 else
121 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
114} 122}
115 123
116static inline unsigned int 124static inline unsigned char page_get_storage_key(unsigned long addr)
117page_get_storage_key(unsigned long addr)
118{ 125{
119 unsigned int skey; 126 unsigned char skey;
120 127
121 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); 128 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
122 return skey; 129 return skey;
123} 130}
124 131
132static inline int page_reset_referenced(unsigned long addr)
133{
134 unsigned int ipm;
135
136 asm volatile(
137 " rrbe 0,%1\n"
138 " ipm %0\n"
139 : "=d" (ipm) : "a" (addr) : "cc");
140 return !!(ipm & 0x20000000);
141}
142
143/* Bits int the storage key */
144#define _PAGE_CHANGED 0x02 /* HW changed bit */
145#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
146#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
147#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
148
149/*
150 * Test and clear dirty bit in storage key.
151 * We can't clear the changed bit atomically. This is a potential
152 * race against modification of the referenced bit. This function
153 * should therefore only be called if it is not mapped in any
154 * address space.
155 */
156#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
157static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
158{
159 unsigned char skey;
160
161 skey = page_get_storage_key(pfn << PAGE_SHIFT);
162 if (!(skey & _PAGE_CHANGED))
163 return 0;
164 page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
165 return 1;
166}
167
168/*
169 * Test and clear referenced bit in storage key.
170 */
171#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
172static inline int page_test_and_clear_young(unsigned long pfn)
173{
174 return page_reset_referenced(pfn << PAGE_SHIFT);
175}
176
125struct page; 177struct page;
126void arch_free_page(struct page *page, int order); 178void arch_free_page(struct page *page, int order);
127void arch_alloc_page(struct page *page, int order); 179void arch_alloc_page(struct page *page, int order);
128 180
181static inline int devmem_is_allowed(unsigned long pfn)
182{
183 return 0;
184}
185
129#define HAVE_ARCH_FREE_PAGE 186#define HAVE_ARCH_FREE_PAGE
130#define HAVE_ARCH_ALLOC_PAGE 187#define HAVE_ARCH_ALLOC_PAGE
131 188
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index f7ad8719d02d..5325c89a5843 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -1,6 +1,9 @@
1#ifndef __ARCH_S390_PERCPU__ 1#ifndef __ARCH_S390_PERCPU__
2#define __ARCH_S390_PERCPU__ 2#define __ARCH_S390_PERCPU__
3 3
4#include <linux/preempt.h>
5#include <asm/cmpxchg.h>
6
4/* 7/*
5 * s390 uses its own implementation for per cpu data, the offset of 8 * s390 uses its own implementation for per cpu data, the offset of
6 * the cpu local data area is cached in the cpu's lowcore memory. 9 * the cpu local data area is cached in the cpu's lowcore memory.
@@ -16,6 +19,71 @@
16#define ARCH_NEEDS_WEAK_PER_CPU 19#define ARCH_NEEDS_WEAK_PER_CPU
17#endif 20#endif
18 21
22#define arch_irqsafe_cpu_to_op(pcp, val, op) \
23do { \
24 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \
26 pcp_op_T__ *ptr__; \
27 preempt_disable(); \
28 ptr__ = __this_cpu_ptr(&(pcp)); \
29 prev__ = *ptr__; \
30 do { \
31 old__ = prev__; \
32 new__ = old__ op (val); \
33 switch (sizeof(*ptr__)) { \
34 case 8: \
35 prev__ = cmpxchg64(ptr__, old__, new__); \
36 break; \
37 default: \
38 prev__ = cmpxchg(ptr__, old__, new__); \
39 } \
40 } while (prev__ != old__); \
41 preempt_enable(); \
42} while (0)
43
44#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
45#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
46#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
47#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
48
49#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
50#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
51#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
52#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
53
54#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
55#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
56#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
57#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
58
59#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
60#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
61#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
62#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
63
64#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
65({ \
66 typedef typeof(pcp) pcp_op_T__; \
67 pcp_op_T__ ret__; \
68 pcp_op_T__ *ptr__; \
69 preempt_disable(); \
70 ptr__ = __this_cpu_ptr(&(pcp)); \
71 switch (sizeof(*ptr__)) { \
72 case 8: \
73 ret__ = cmpxchg64(ptr__, oval, nval); \
74 break; \
75 default: \
76 ret__ = cmpxchg(ptr__, oval, nval); \
77 } \
78 preempt_enable(); \
79 ret__; \
80})
81
82#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
83#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
84#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
85#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
86
19#include <asm-generic/percpu.h> 87#include <asm-generic/percpu.h>
20 88
21#endif /* __ARCH_S390_PERCPU__ */ 89#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 3840cbe77637..a75f168d2718 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -4,7 +4,6 @@
4 * Copyright 2009 Martin Schwidefsky, IBM Corporation. 4 * Copyright 2009 Martin Schwidefsky, IBM Corporation.
5 */ 5 */
6 6
7static inline void set_perf_event_pending(void) {} 7/* Empty, just to avoid compiling error */
8static inline void clear_perf_event_pending(void) {}
9 8
10#define PERF_EVENT_INDEX_OFFSET 0 9#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 68940d0bad91..38e71ebcd3c2 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -17,14 +17,15 @@
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#define check_pgt_cache() do {} while (0) 20unsigned long *crst_table_alloc(struct mm_struct *);
21
22unsigned long *crst_table_alloc(struct mm_struct *, int);
23void crst_table_free(struct mm_struct *, unsigned long *); 21void crst_table_free(struct mm_struct *, unsigned long *);
24 22
25unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *);
26void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
27void disable_noexec(struct mm_struct *, struct task_struct *); 25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *);
27void __tlb_remove_table(void *_table);
28#endif
28 29
29static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
30{ 31{
@@ -48,9 +49,6 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
48static inline void crst_table_init(unsigned long *crst, unsigned long entry) 49static inline void crst_table_init(unsigned long *crst, unsigned long entry)
49{ 50{
50 clear_table(crst, entry, sizeof(unsigned long)*2048); 51 clear_table(crst, entry, sizeof(unsigned long)*2048);
51 crst = get_shadow_table(crst);
52 if (crst)
53 clear_table(crst, entry, sizeof(unsigned long)*2048);
54} 52}
55 53
56#ifndef __s390x__ 54#ifndef __s390x__
@@ -67,10 +65,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
67#define pmd_free(mm, x) do { } while (0) 65#define pmd_free(mm, x) do { } while (0)
68 66
69#define pgd_populate(mm, pgd, pud) BUG() 67#define pgd_populate(mm, pgd, pud) BUG()
70#define pgd_populate_kernel(mm, pgd, pud) BUG()
71
72#define pud_populate(mm, pud, pmd) BUG() 68#define pud_populate(mm, pud, pmd) BUG()
73#define pud_populate_kernel(mm, pud, pmd) BUG()
74 69
75#else /* __s390x__ */ 70#else /* __s390x__ */
76 71
@@ -88,7 +83,7 @@ void crst_table_downgrade(struct mm_struct *, unsigned long limit);
88 83
89static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 84static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
90{ 85{
91 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 86 unsigned long *table = crst_table_alloc(mm);
92 if (table) 87 if (table)
93 crst_table_init(table, _REGION3_ENTRY_EMPTY); 88 crst_table_init(table, _REGION3_ENTRY_EMPTY);
94 return (pud_t *) table; 89 return (pud_t *) table;
@@ -97,43 +92,21 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
97 92
98static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 93static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
99{ 94{
100 unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 95 unsigned long *table = crst_table_alloc(mm);
101 if (table) 96 if (table)
102 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 97 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
103 return (pmd_t *) table; 98 return (pmd_t *) table;
104} 99}
105#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 100#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
106 101
107static inline void pgd_populate_kernel(struct mm_struct *mm,
108 pgd_t *pgd, pud_t *pud)
109{
110 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
111}
112
113static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 102static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
114{ 103{
115 pgd_populate_kernel(mm, pgd, pud); 104 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
116 if (mm->context.noexec) {
117 pgd = get_shadow_table(pgd);
118 pud = get_shadow_table(pud);
119 pgd_populate_kernel(mm, pgd, pud);
120 }
121}
122
123static inline void pud_populate_kernel(struct mm_struct *mm,
124 pud_t *pud, pmd_t *pmd)
125{
126 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
127} 105}
128 106
129static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 107static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
130{ 108{
131 pud_populate_kernel(mm, pud, pmd); 109 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
132 if (mm->context.noexec) {
133 pud = get_shadow_table(pud);
134 pmd = get_shadow_table(pmd);
135 pud_populate_kernel(mm, pud, pmd);
136 }
137} 110}
138 111
139#endif /* __s390x__ */ 112#endif /* __s390x__ */
@@ -141,29 +114,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
141static inline pgd_t *pgd_alloc(struct mm_struct *mm) 114static inline pgd_t *pgd_alloc(struct mm_struct *mm)
142{ 115{
143 spin_lock_init(&mm->context.list_lock); 116 spin_lock_init(&mm->context.list_lock);
144 INIT_LIST_HEAD(&mm->context.crst_list);
145 INIT_LIST_HEAD(&mm->context.pgtable_list); 117 INIT_LIST_HEAD(&mm->context.pgtable_list);
146 return (pgd_t *) 118 return (pgd_t *) crst_table_alloc(mm);
147 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
148} 119}
149#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 120#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
150 121
151static inline void pmd_populate_kernel(struct mm_struct *mm,
152 pmd_t *pmd, pte_t *pte)
153{
154 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
155}
156
157static inline void pmd_populate(struct mm_struct *mm, 122static inline void pmd_populate(struct mm_struct *mm,
158 pmd_t *pmd, pgtable_t pte) 123 pmd_t *pmd, pgtable_t pte)
159{ 124{
160 pmd_populate_kernel(mm, pmd, pte); 125 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
161 if (mm->context.noexec) {
162 pmd = get_shadow_table(pmd);
163 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
164 }
165} 126}
166 127
128#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
129
167#define pmd_pgtable(pmd) \ 130#define pmd_pgtable(pmd) \
168 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) 131 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
169 132
@@ -176,4 +139,6 @@ static inline void pmd_populate(struct mm_struct *mm,
176#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 139#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
177#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 140#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
178 141
142extern void rcu_table_freelist_finish(void);
143
179#endif /* _S390_PGALLOC_H */ 144#endif /* _S390_PGALLOC_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 3157441ee1da..801fbe1d837d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -31,13 +31,13 @@
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/mm_types.h> 33#include <linux/mm_types.h>
34#include <asm/bitops.h>
35#include <asm/bug.h> 34#include <asm/bug.h>
36#include <asm/processor.h> 35#include <asm/page.h>
37 36
38extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 37extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39extern void paging_init(void); 38extern void paging_init(void);
40extern void vmem_map_init(void); 39extern void vmem_map_init(void);
40extern void fault_init(void);
41 41
42/* 42/*
43 * The S390 doesn't have any external MMU info: the kernel page 43 * The S390 doesn't have any external MMU info: the kernel page
@@ -46,11 +46,27 @@ extern void vmem_map_init(void);
46#define update_mmu_cache(vma, address, ptep) do { } while (0) 46#define update_mmu_cache(vma, address, ptep) do { } while (0)
47 47
48/* 48/*
49 * ZERO_PAGE is a global shared page that is always zero: used 49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc.. 50 * for zero-mapped memory areas etc..
51 */ 51 */
52extern char empty_zero_page[PAGE_SIZE]; 52
53#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 53extern unsigned long empty_zero_page;
54extern unsigned long zero_page_mask;
55
56#define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59
60#define is_zero_pfn is_zero_pfn
61static inline int is_zero_pfn(unsigned long pfn)
62{
63 extern unsigned long zero_pfn;
64 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
65 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
66}
67
68#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
69
54#endif /* !__ASSEMBLY__ */ 70#endif /* !__ASSEMBLY__ */
55 71
56/* 72/*
@@ -226,11 +242,13 @@ extern unsigned long VMALLOC_START;
226/* Software bits in the page table entry */ 242/* Software bits in the page table entry */
227#define _PAGE_SWT 0x001 /* SW pte type bit t */ 243#define _PAGE_SWT 0x001 /* SW pte type bit t */
228#define _PAGE_SWX 0x002 /* SW pte type bit x */ 244#define _PAGE_SWX 0x002 /* SW pte type bit x */
229#define _PAGE_SPECIAL 0x004 /* SW associated with special page */ 245#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
246#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
247#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
230#define __HAVE_ARCH_PTE_SPECIAL 248#define __HAVE_ARCH_PTE_SPECIAL
231 249
232/* Set of bits not changed in pte_modify */ 250/* Set of bits not changed in pte_modify */
233#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) 251#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
234 252
235/* Six different types of pages. */ 253/* Six different types of pages. */
236#define _PAGE_TYPE_EMPTY 0x400 254#define _PAGE_TYPE_EMPTY 0x400
@@ -239,8 +257,6 @@ extern unsigned long VMALLOC_START;
239#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 257#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
240#define _PAGE_TYPE_RO 0x200 258#define _PAGE_TYPE_RO 0x200
241#define _PAGE_TYPE_RW 0x000 259#define _PAGE_TYPE_RW 0x000
242#define _PAGE_TYPE_EX_RO 0x202
243#define _PAGE_TYPE_EX_RW 0x002
244 260
245/* 261/*
246 * Only four types for huge pages, using the invalid bit and protection bit 262 * Only four types for huge pages, using the invalid bit and protection bit
@@ -270,8 +286,6 @@ extern unsigned long VMALLOC_START;
270 * _PAGE_TYPE_FILE 11?1 -> 11?1 286 * _PAGE_TYPE_FILE 11?1 -> 11?1
271 * _PAGE_TYPE_RO 0100 -> 1100 287 * _PAGE_TYPE_RO 0100 -> 1100
272 * _PAGE_TYPE_RW 0000 -> 1000 288 * _PAGE_TYPE_RW 0000 -> 1000
273 * _PAGE_TYPE_EX_RO 0110 -> 1110
274 * _PAGE_TYPE_EX_RW 0010 -> 1010
275 * 289 *
276 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 290 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
277 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 291 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
@@ -279,16 +293,6 @@ extern unsigned long VMALLOC_START;
279 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
280 */ 294 */
281 295
282/* Page status table bits for virtualization */
283#define RCP_PCL_BIT 55
284#define RCP_HR_BIT 54
285#define RCP_HC_BIT 53
286#define RCP_GR_BIT 50
287#define RCP_GC_BIT 49
288
289/* User dirty bit for KVM's migration feature */
290#define KVM_UD_BIT 47
291
292#ifndef __s390x__ 296#ifndef __s390x__
293 297
294/* Bits in the segment table address-space-control-element */ 298/* Bits in the segment table address-space-control-element */
@@ -300,6 +304,7 @@ extern unsigned long VMALLOC_START;
300 304
301/* Bits in the segment table entry */ 305/* Bits in the segment table entry */
302#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 306#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
307#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
303#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 308#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
304#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 309#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
305#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 310#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
@@ -307,6 +312,19 @@ extern unsigned long VMALLOC_START;
307#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 312#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
308#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 313#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
309 314
315/* Page status table bits for virtualization */
316#define RCP_ACC_BITS 0xf0000000UL
317#define RCP_FP_BIT 0x08000000UL
318#define RCP_PCL_BIT 0x00800000UL
319#define RCP_HR_BIT 0x00400000UL
320#define RCP_HC_BIT 0x00200000UL
321#define RCP_GR_BIT 0x00040000UL
322#define RCP_GC_BIT 0x00020000UL
323
324/* User dirty / referenced bit for KVM's migration feature */
325#define KVM_UR_BIT 0x00008000UL
326#define KVM_UC_BIT 0x00004000UL
327
310#else /* __s390x__ */ 328#else /* __s390x__ */
311 329
312/* Bits in the segment/region table address-space-control-element */ 330/* Bits in the segment/region table address-space-control-element */
@@ -349,6 +367,19 @@ extern unsigned long VMALLOC_START;
349#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
350#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
351 369
370/* Page status table bits for virtualization */
371#define RCP_ACC_BITS 0xf000000000000000UL
372#define RCP_FP_BIT 0x0800000000000000UL
373#define RCP_PCL_BIT 0x0080000000000000UL
374#define RCP_HR_BIT 0x0040000000000000UL
375#define RCP_HC_BIT 0x0020000000000000UL
376#define RCP_GR_BIT 0x0004000000000000UL
377#define RCP_GC_BIT 0x0002000000000000UL
378
379/* User dirty / referenced bit for KVM's migration feature */
380#define KVM_UR_BIT 0x0000800000000000UL
381#define KVM_UC_BIT 0x0000400000000000UL
382
352#endif /* __s390x__ */ 383#endif /* __s390x__ */
353 384
354/* 385/*
@@ -359,85 +390,54 @@ extern unsigned long VMALLOC_START;
359#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 390#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
360 _ASCE_ALT_EVENT) 391 _ASCE_ALT_EVENT)
361 392
362/* Bits int the storage key */
363#define _PAGE_CHANGED 0x02 /* HW changed bit */
364#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
365
366/* 393/*
367 * Page protection definitions. 394 * Page protection definitions.
368 */ 395 */
369#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 396#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
370#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 397#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
371#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 398#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
372#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
373#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
374 399
375#define PAGE_KERNEL PAGE_RW 400#define PAGE_KERNEL PAGE_RW
376#define PAGE_COPY PAGE_RO 401#define PAGE_COPY PAGE_RO
377 402
378/* 403/*
379 * Dependent on the EXEC_PROTECT option s390 can do execute protection. 404 * On s390 the page table entry has an invalid bit and a read-only bit.
380 * Write permission always implies read permission. In theory with a 405 * Read permission implies execute permission and write permission
381 * primary/secondary page table execute only can be implemented but 406 * implies read permission.
382 * it would cost an additional bit in the pte to distinguish all the
383 * different pte types. To avoid that execute permission currently
384 * implies read permission as well.
385 */ 407 */
386 /*xwr*/ 408 /*xwr*/
387#define __P000 PAGE_NONE 409#define __P000 PAGE_NONE
388#define __P001 PAGE_RO 410#define __P001 PAGE_RO
389#define __P010 PAGE_RO 411#define __P010 PAGE_RO
390#define __P011 PAGE_RO 412#define __P011 PAGE_RO
391#define __P100 PAGE_EX_RO 413#define __P100 PAGE_RO
392#define __P101 PAGE_EX_RO 414#define __P101 PAGE_RO
393#define __P110 PAGE_EX_RO 415#define __P110 PAGE_RO
394#define __P111 PAGE_EX_RO 416#define __P111 PAGE_RO
395 417
396#define __S000 PAGE_NONE 418#define __S000 PAGE_NONE
397#define __S001 PAGE_RO 419#define __S001 PAGE_RO
398#define __S010 PAGE_RW 420#define __S010 PAGE_RW
399#define __S011 PAGE_RW 421#define __S011 PAGE_RW
400#define __S100 PAGE_EX_RO 422#define __S100 PAGE_RO
401#define __S101 PAGE_EX_RO 423#define __S101 PAGE_RO
402#define __S110 PAGE_EX_RW 424#define __S110 PAGE_RW
403#define __S111 PAGE_EX_RW 425#define __S111 PAGE_RW
404
405#ifndef __s390x__
406# define PxD_SHADOW_SHIFT 1
407#else /* __s390x__ */
408# define PxD_SHADOW_SHIFT 2
409#endif /* __s390x__ */
410 426
411static inline void *get_shadow_table(void *table) 427static inline int mm_exclusive(struct mm_struct *mm)
412{ 428{
413 unsigned long addr, offset; 429 return likely(mm == current->active_mm &&
414 struct page *page; 430 atomic_read(&mm->context.attach_count) <= 1);
415
416 addr = (unsigned long) table;
417 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
418 page = virt_to_page((void *)(addr ^ offset));
419 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
420} 431}
421 432
422/* 433static inline int mm_has_pgste(struct mm_struct *mm)
423 * Certain architectures need to do special things when PTEs
424 * within a page table are directly modified. Thus, the following
425 * hook is made available.
426 */
427static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
428 pte_t *ptep, pte_t entry)
429{ 434{
430 *ptep = entry; 435#ifdef CONFIG_PGSTE
431 if (mm->context.noexec) { 436 if (unlikely(mm->context.has_pgste))
432 if (!(pte_val(entry) & _PAGE_INVALID) && 437 return 1;
433 (pte_val(entry) & _PAGE_SWX)) 438#endif
434 pte_val(entry) |= _PAGE_RO; 439 return 0;
435 else
436 pte_val(entry) = _PAGE_TYPE_EMPTY;
437 ptep[PTRS_PER_PTE] = entry;
438 }
439} 440}
440
441/* 441/*
442 * pgd/pmd/pte query functions 442 * pgd/pmd/pte query functions
443 */ 443 */
@@ -550,55 +550,130 @@ static inline int pte_special(pte_t pte)
550} 550}
551 551
552#define __HAVE_ARCH_PTE_SAME 552#define __HAVE_ARCH_PTE_SAME
553#define pte_same(a,b) (pte_val(a) == pte_val(b)) 553static inline int pte_same(pte_t a, pte_t b)
554{
555 return pte_val(a) == pte_val(b);
556}
554 557
555static inline void rcp_lock(pte_t *ptep) 558static inline pgste_t pgste_get_lock(pte_t *ptep)
556{ 559{
560 unsigned long new = 0;
557#ifdef CONFIG_PGSTE 561#ifdef CONFIG_PGSTE
558 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 562 unsigned long old;
563
559 preempt_disable(); 564 preempt_disable();
560 while (test_and_set_bit(RCP_PCL_BIT, pgste)) 565 asm(
561 ; 566 " lg %0,%2\n"
567 "0: lgr %1,%0\n"
568 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
569 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
570 " csg %0,%1,%2\n"
571 " jl 0b\n"
572 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
573 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
562#endif 574#endif
575 return __pgste(new);
563} 576}
564 577
565static inline void rcp_unlock(pte_t *ptep) 578static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
566{ 579{
567#ifdef CONFIG_PGSTE 580#ifdef CONFIG_PGSTE
568 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 581 asm(
569 clear_bit(RCP_PCL_BIT, pgste); 582 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
583 " stg %1,%0\n"
584 : "=Q" (ptep[PTRS_PER_PTE])
585 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
570 preempt_enable(); 586 preempt_enable();
571#endif 587#endif
572} 588}
573 589
574/* forward declaration for SetPageUptodate in page-flags.h*/ 590static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
575static inline void page_clear_dirty(struct page *page);
576#include <linux/page-flags.h>
577
578static inline void ptep_rcp_copy(pte_t *ptep)
579{ 591{
580#ifdef CONFIG_PGSTE 592#ifdef CONFIG_PGSTE
581 struct page *page = virt_to_page(pte_val(*ptep)); 593 unsigned long address, bits;
582 unsigned int skey; 594 unsigned char skey;
583 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 595
584 596 address = pte_val(*ptep) & PAGE_MASK;
585 skey = page_get_storage_key(page_to_phys(page)); 597 skey = page_get_storage_key(address);
586 if (skey & _PAGE_CHANGED) { 598 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
587 set_bit_simple(RCP_GC_BIT, pgste); 599 /* Clear page changed & referenced bit in the storage key */
588 set_bit_simple(KVM_UD_BIT, pgste); 600 if (bits) {
589 } 601 skey ^= bits;
590 if (skey & _PAGE_REFERENCED) 602 page_set_storage_key(address, skey, 1);
591 set_bit_simple(RCP_GR_BIT, pgste);
592 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
593 SetPageDirty(page);
594 set_bit_simple(KVM_UD_BIT, pgste);
595 } 603 }
596 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) 604 /* Transfer page changed & referenced bit to guest bits in pgste */
597 SetPageReferenced(page); 605 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
606 /* Get host changed & referenced bits from pgste */
607 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
608 /* Clear host bits in pgste. */
609 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
610 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
611 /* Copy page access key and fetch protection bit to pgste */
612 pgste_val(pgste) |=
613 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
614 /* Transfer changed and referenced to kvm user bits */
615 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
616 /* Transfer changed & referenced to pte sofware bits */
617 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
618#endif
619 return pgste;
620
621}
622
623static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
624{
625#ifdef CONFIG_PGSTE
626 int young;
627
628 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
629 /* Transfer page referenced bit to pte software bit (host view) */
630 if (young || (pgste_val(pgste) & RCP_HR_BIT))
631 pte_val(*ptep) |= _PAGE_SWR;
632 /* Clear host referenced bit in pgste. */
633 pgste_val(pgste) &= ~RCP_HR_BIT;
634 /* Transfer page referenced bit to guest bit in pgste */
635 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
636#endif
637 return pgste;
638
639}
640
641static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
642{
643#ifdef CONFIG_PGSTE
644 unsigned long address;
645 unsigned long okey, nkey;
646
647 address = pte_val(*ptep) & PAGE_MASK;
648 okey = nkey = page_get_storage_key(address);
649 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
650 /* Set page access key and fetch protection bit from pgste */
651 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
652 if (okey != nkey)
653 page_set_storage_key(address, nkey, 1);
598#endif 654#endif
599} 655}
600 656
601/* 657/*
658 * Certain architectures need to do special things when PTEs
659 * within a page table are directly modified. Thus, the following
660 * hook is made available.
661 */
662static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
663 pte_t *ptep, pte_t entry)
664{
665 pgste_t pgste;
666
667 if (mm_has_pgste(mm)) {
668 pgste = pgste_get_lock(ptep);
669 pgste_set_pte(ptep, pgste);
670 *ptep = entry;
671 pgste_set_unlock(ptep, pgste);
672 } else
673 *ptep = entry;
674}
675
676/*
602 * query functions pte_write/pte_dirty/pte_young only work if 677 * query functions pte_write/pte_dirty/pte_young only work if
603 * pte_present() is true. Undefined behaviour if not.. 678 * pte_present() is true. Undefined behaviour if not..
604 */ 679 */
@@ -609,19 +684,19 @@ static inline int pte_write(pte_t pte)
609 684
610static inline int pte_dirty(pte_t pte) 685static inline int pte_dirty(pte_t pte)
611{ 686{
612 /* A pte is neither clean nor dirty on s/390. The dirty bit 687#ifdef CONFIG_PGSTE
613 * is in the storage key. See page_test_and_clear_dirty for 688 if (pte_val(pte) & _PAGE_SWC)
614 * details. 689 return 1;
615 */ 690#endif
616 return 0; 691 return 0;
617} 692}
618 693
619static inline int pte_young(pte_t pte) 694static inline int pte_young(pte_t pte)
620{ 695{
621 /* A pte is neither young nor old on s/390. The young bit 696#ifdef CONFIG_PGSTE
622 * is in the storage key. See page_test_and_clear_young for 697 if (pte_val(pte) & _PAGE_SWR)
623 * details. 698 return 1;
624 */ 699#endif
625 return 0; 700 return 0;
626} 701}
627 702
@@ -629,64 +704,30 @@ static inline int pte_young(pte_t pte)
629 * pgd/pmd/pte modification functions 704 * pgd/pmd/pte modification functions
630 */ 705 */
631 706
632#ifndef __s390x__ 707static inline void pgd_clear(pgd_t *pgd)
633
634#define pgd_clear(pgd) do { } while (0)
635#define pud_clear(pud) do { } while (0)
636
637#else /* __s390x__ */
638
639static inline void pgd_clear_kernel(pgd_t * pgd)
640{ 708{
709#ifdef __s390x__
641 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 710 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
642 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 711 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
712#endif
643} 713}
644 714
645static inline void pgd_clear(pgd_t * pgd) 715static inline void pud_clear(pud_t *pud)
646{
647 pgd_t *shadow = get_shadow_table(pgd);
648
649 pgd_clear_kernel(pgd);
650 if (shadow)
651 pgd_clear_kernel(shadow);
652}
653
654static inline void pud_clear_kernel(pud_t *pud)
655{ 716{
717#ifdef __s390x__
656 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 718 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
657 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 719 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
720#endif
658} 721}
659 722
660static inline void pud_clear(pud_t *pud) 723static inline void pmd_clear(pmd_t *pmdp)
661{
662 pud_t *shadow = get_shadow_table(pud);
663
664 pud_clear_kernel(pud);
665 if (shadow)
666 pud_clear_kernel(shadow);
667}
668
669#endif /* __s390x__ */
670
671static inline void pmd_clear_kernel(pmd_t * pmdp)
672{ 724{
673 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 725 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
674} 726}
675 727
676static inline void pmd_clear(pmd_t *pmd)
677{
678 pmd_t *shadow = get_shadow_table(pmd);
679
680 pmd_clear_kernel(pmd);
681 if (shadow)
682 pmd_clear_kernel(shadow);
683}
684
685static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 728static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
686{ 729{
687 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 730 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
688 if (mm->context.noexec)
689 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
690} 731}
691 732
692/* 733/*
@@ -716,35 +757,27 @@ static inline pte_t pte_mkwrite(pte_t pte)
716 757
717static inline pte_t pte_mkclean(pte_t pte) 758static inline pte_t pte_mkclean(pte_t pte)
718{ 759{
719 /* The only user of pte_mkclean is the fork() code. 760#ifdef CONFIG_PGSTE
720 We must *not* clear the *physical* page dirty bit 761 pte_val(pte) &= ~_PAGE_SWC;
721 just because fork() wants to clear the dirty bit in 762#endif
722 *one* of the page's mappings. So we just do nothing. */
723 return pte; 763 return pte;
724} 764}
725 765
726static inline pte_t pte_mkdirty(pte_t pte) 766static inline pte_t pte_mkdirty(pte_t pte)
727{ 767{
728 /* We do not explicitly set the dirty bit because the
729 * sske instruction is slow. It is faster to let the
730 * next instruction set the dirty bit.
731 */
732 return pte; 768 return pte;
733} 769}
734 770
735static inline pte_t pte_mkold(pte_t pte) 771static inline pte_t pte_mkold(pte_t pte)
736{ 772{
737 /* S/390 doesn't keep its dirty/referenced bit in the pte. 773#ifdef CONFIG_PGSTE
738 * There is no point in clearing the real referenced bit. 774 pte_val(pte) &= ~_PAGE_SWR;
739 */ 775#endif
740 return pte; 776 return pte;
741} 777}
742 778
743static inline pte_t pte_mkyoung(pte_t pte) 779static inline pte_t pte_mkyoung(pte_t pte)
744{ 780{
745 /* S/390 doesn't keep its dirty/referenced bit in the pte.
746 * There is no point in setting the real referenced bit.
747 */
748 return pte; 781 return pte;
749} 782}
750 783
@@ -754,62 +787,88 @@ static inline pte_t pte_mkspecial(pte_t pte)
754 return pte; 787 return pte;
755} 788}
756 789
757#ifdef CONFIG_PGSTE 790#ifdef CONFIG_HUGETLB_PAGE
791static inline pte_t pte_mkhuge(pte_t pte)
792{
793 /*
794 * PROT_NONE needs to be remapped from the pte type to the ste type.
795 * The HW invalid bit is also different for pte and ste. The pte
796 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
797 * bit, so we don't have to clear it.
798 */
799 if (pte_val(pte) & _PAGE_INVALID) {
800 if (pte_val(pte) & _PAGE_SWT)
801 pte_val(pte) |= _HPAGE_TYPE_NONE;
802 pte_val(pte) |= _SEGMENT_ENTRY_INV;
803 }
804 /*
805 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
806 * table entry.
807 */
808 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
809 /*
810 * Also set the change-override bit because we don't need dirty bit
811 * tracking for hugetlbfs pages.
812 */
813 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
814 return pte;
815}
816#endif
817
758/* 818/*
759 * Get (and clear) the user dirty bit for a PTE. 819 * Get (and clear) the user dirty bit for a pte.
760 */ 820 */
761static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, 821static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
762 pte_t *ptep) 822 pte_t *ptep)
763{ 823{
764 int dirty; 824 pgste_t pgste;
765 unsigned long *pgste; 825 int dirty = 0;
766 struct page *page; 826
767 unsigned int skey; 827 if (mm_has_pgste(mm)) {
768 828 pgste = pgste_get_lock(ptep);
769 if (!mm->context.has_pgste) 829 pgste = pgste_update_all(ptep, pgste);
770 return -EINVAL; 830 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
771 rcp_lock(ptep); 831 pgste_val(pgste) &= ~KVM_UC_BIT;
772 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 832 pgste_set_unlock(ptep, pgste);
773 page = virt_to_page(pte_val(*ptep)); 833 return dirty;
774 skey = page_get_storage_key(page_to_phys(page));
775 if (skey & _PAGE_CHANGED) {
776 set_bit_simple(RCP_GC_BIT, pgste);
777 set_bit_simple(KVM_UD_BIT, pgste);
778 } 834 }
779 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
780 SetPageDirty(page);
781 set_bit_simple(KVM_UD_BIT, pgste);
782 }
783 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
784 if (skey & _PAGE_CHANGED)
785 page_clear_dirty(page);
786 rcp_unlock(ptep);
787 return dirty; 835 return dirty;
788} 836}
789#endif 837
838/*
839 * Get (and clear) the user referenced bit for a pte.
840 */
841static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
842 pte_t *ptep)
843{
844 pgste_t pgste;
845 int young = 0;
846
847 if (mm_has_pgste(mm)) {
848 pgste = pgste_get_lock(ptep);
849 pgste = pgste_update_young(ptep, pgste);
850 young = !!(pgste_val(pgste) & KVM_UR_BIT);
851 pgste_val(pgste) &= ~KVM_UR_BIT;
852 pgste_set_unlock(ptep, pgste);
853 }
854 return young;
855}
790 856
791#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 857#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
792static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 858static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
793 unsigned long addr, pte_t *ptep) 859 unsigned long addr, pte_t *ptep)
794{ 860{
795#ifdef CONFIG_PGSTE 861 pgste_t pgste;
796 unsigned long physpage; 862 pte_t pte;
797 int young;
798 unsigned long *pgste;
799 863
800 if (!vma->vm_mm->context.has_pgste) 864 if (mm_has_pgste(vma->vm_mm)) {
801 return 0; 865 pgste = pgste_get_lock(ptep);
802 physpage = pte_val(*ptep) & PAGE_MASK; 866 pgste = pgste_update_young(ptep, pgste);
803 pgste = (unsigned long *) (ptep + PTRS_PER_PTE); 867 pte = *ptep;
804 868 *ptep = pte_mkold(pte);
805 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); 869 pgste_set_unlock(ptep, pgste);
806 rcp_lock(ptep); 870 return pte_young(pte);
807 if (young) 871 }
808 set_bit_simple(RCP_GR_BIT, pgste);
809 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
810 rcp_unlock(ptep);
811 return young;
812#endif
813 return 0; 872 return 0;
814} 873}
815 874
@@ -821,10 +880,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
821 * On s390 reference bits are in storage key and never in TLB 880 * On s390 reference bits are in storage key and never in TLB
822 * With virtualization we handle the reference bit, without we 881 * With virtualization we handle the reference bit, without we
823 * we can simply return */ 882 * we can simply return */
824#ifdef CONFIG_PGSTE
825 return ptep_test_and_clear_young(vma, address, ptep); 883 return ptep_test_and_clear_young(vma, address, ptep);
826#endif
827 return 0;
828} 884}
829 885
830static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 886static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
@@ -844,25 +900,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
844 } 900 }
845} 901}
846 902
847static inline void ptep_invalidate(struct mm_struct *mm,
848 unsigned long address, pte_t *ptep)
849{
850 if (mm->context.has_pgste) {
851 rcp_lock(ptep);
852 __ptep_ipte(address, ptep);
853 ptep_rcp_copy(ptep);
854 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
855 rcp_unlock(ptep);
856 return;
857 }
858 __ptep_ipte(address, ptep);
859 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
860 if (mm->context.noexec) {
861 __ptep_ipte(address, ptep + PTRS_PER_PTE);
862 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
863 }
864}
865
866/* 903/*
867 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 904 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
868 * both clear the TLB for the unmapped pte. The reason is that 905 * both clear the TLB for the unmapped pte. The reason is that
@@ -877,24 +914,72 @@ static inline void ptep_invalidate(struct mm_struct *mm,
877 * is a nop. 914 * is a nop.
878 */ 915 */
879#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 916#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
880#define ptep_get_and_clear(__mm, __address, __ptep) \ 917static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
881({ \ 918 unsigned long address, pte_t *ptep)
882 pte_t __pte = *(__ptep); \ 919{
883 (__mm)->context.flush_mm = 1; \ 920 pgste_t pgste;
884 if (atomic_read(&(__mm)->context.attach_count) > 1 || \ 921 pte_t pte;
885 (__mm) != current->active_mm) \ 922
886 ptep_invalidate(__mm, __address, __ptep); \ 923 mm->context.flush_mm = 1;
887 else \ 924 if (mm_has_pgste(mm))
888 pte_clear((__mm), (__address), (__ptep)); \ 925 pgste = pgste_get_lock(ptep);
889 __pte; \ 926
890}) 927 pte = *ptep;
928 if (!mm_exclusive(mm))
929 __ptep_ipte(address, ptep);
930 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
931
932 if (mm_has_pgste(mm)) {
933 pgste = pgste_update_all(&pte, pgste);
934 pgste_set_unlock(ptep, pgste);
935 }
936 return pte;
937}
938
939#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
940static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
941 unsigned long address,
942 pte_t *ptep)
943{
944 pte_t pte;
945
946 mm->context.flush_mm = 1;
947 if (mm_has_pgste(mm))
948 pgste_get_lock(ptep);
949
950 pte = *ptep;
951 if (!mm_exclusive(mm))
952 __ptep_ipte(address, ptep);
953 return pte;
954}
955
956static inline void ptep_modify_prot_commit(struct mm_struct *mm,
957 unsigned long address,
958 pte_t *ptep, pte_t pte)
959{
960 *ptep = pte;
961 if (mm_has_pgste(mm))
962 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
963}
891 964
892#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 965#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
893static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 966static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
894 unsigned long address, pte_t *ptep) 967 unsigned long address, pte_t *ptep)
895{ 968{
896 pte_t pte = *ptep; 969 pgste_t pgste;
897 ptep_invalidate(vma->vm_mm, address, ptep); 970 pte_t pte;
971
972 if (mm_has_pgste(vma->vm_mm))
973 pgste = pgste_get_lock(ptep);
974
975 pte = *ptep;
976 __ptep_ipte(address, ptep);
977 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
978
979 if (mm_has_pgste(vma->vm_mm)) {
980 pgste = pgste_update_all(&pte, pgste);
981 pgste_set_unlock(ptep, pgste);
982 }
898 return pte; 983 return pte;
899} 984}
900 985
@@ -907,76 +992,67 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
907 */ 992 */
908#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 993#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
909static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 994static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
910 unsigned long addr, 995 unsigned long address,
911 pte_t *ptep, int full) 996 pte_t *ptep, int full)
912{ 997{
913 pte_t pte = *ptep; 998 pgste_t pgste;
999 pte_t pte;
1000
1001 if (mm_has_pgste(mm))
1002 pgste = pgste_get_lock(ptep);
914 1003
915 if (full) 1004 pte = *ptep;
916 pte_clear(mm, addr, ptep); 1005 if (!full)
917 else 1006 __ptep_ipte(address, ptep);
918 ptep_invalidate(mm, addr, ptep); 1007 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1008
1009 if (mm_has_pgste(mm)) {
1010 pgste = pgste_update_all(&pte, pgste);
1011 pgste_set_unlock(ptep, pgste);
1012 }
919 return pte; 1013 return pte;
920} 1014}
921 1015
922#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1016#define __HAVE_ARCH_PTEP_SET_WRPROTECT
923#define ptep_set_wrprotect(__mm, __addr, __ptep) \ 1017static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
924({ \ 1018 unsigned long address, pte_t *ptep)
925 pte_t __pte = *(__ptep); \ 1019{
926 if (pte_write(__pte)) { \ 1020 pgste_t pgste;
927 (__mm)->context.flush_mm = 1; \ 1021 pte_t pte = *ptep;
928 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
929 (__mm) != current->active_mm) \
930 ptep_invalidate(__mm, __addr, __ptep); \
931 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
932 } \
933})
934 1022
935#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1023 if (pte_write(pte)) {
936#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 1024 mm->context.flush_mm = 1;
937({ \ 1025 if (mm_has_pgste(mm))
938 int __changed = !pte_same(*(__ptep), __entry); \ 1026 pgste = pgste_get_lock(ptep);
939 if (__changed) { \
940 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
941 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
942 } \
943 __changed; \
944})
945 1027
946/* 1028 if (!mm_exclusive(mm))
947 * Test and clear dirty bit in storage key. 1029 __ptep_ipte(address, ptep);
948 * We can't clear the changed bit atomically. This is a potential 1030 *ptep = pte_wrprotect(pte);
949 * race against modification of the referenced bit. This function
950 * should therefore only be called if it is not mapped in any
951 * address space.
952 */
953#define __HAVE_ARCH_PAGE_TEST_DIRTY
954static inline int page_test_dirty(struct page *page)
955{
956 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
957}
958 1031
959#define __HAVE_ARCH_PAGE_CLEAR_DIRTY 1032 if (mm_has_pgste(mm))
960static inline void page_clear_dirty(struct page *page) 1033 pgste_set_unlock(ptep, pgste);
961{ 1034 }
962 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); 1035 return pte;
963} 1036}
964 1037
965/* 1038#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
966 * Test and clear referenced bit in storage key. 1039static inline int ptep_set_access_flags(struct vm_area_struct *vma,
967 */ 1040 unsigned long address, pte_t *ptep,
968#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 1041 pte_t entry, int dirty)
969static inline int page_test_and_clear_young(struct page *page)
970{ 1042{
971 unsigned long physpage = page_to_phys(page); 1043 pgste_t pgste;
972 int ccode; 1044
973 1045 if (pte_same(*ptep, entry))
974 asm volatile( 1046 return 0;
975 " rrbe 0,%1\n" 1047 if (mm_has_pgste(vma->vm_mm))
976 " ipm %0\n" 1048 pgste = pgste_get_lock(ptep);
977 " srl %0,28\n" 1049
978 : "=d" (ccode) : "a" (physpage) : "cc" ); 1050 __ptep_ipte(address, ptep);
979 return ccode & 2; 1051 *ptep = entry;
1052
1053 if (mm_has_pgste(vma->vm_mm))
1054 pgste_set_unlock(ptep, pgste);
1055 return 1;
980} 1056}
981 1057
982/* 1058/*
@@ -1048,9 +1124,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1048#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1124#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1049#define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 1125#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1050#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1126#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1051#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
1052#define pte_unmap(pte) do { } while (0) 1127#define pte_unmap(pte) do { } while (0)
1053#define pte_unmap_nested(pte) do { } while (0)
1054 1128
1055/* 1129/*
1056 * 31 bit swap entry format: 1130 * 31 bit swap entry format:
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 73e259834e10..1300c3025334 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -32,7 +32,6 @@ static inline void get_cpu_id(struct cpuid *ptr)
32} 32}
33 33
34extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
35extern void print_cpu_info(void);
36extern int get_cpu_capability(unsigned int *); 35extern int get_cpu_capability(unsigned int *);
37 36
38/* 37/*
@@ -81,11 +80,11 @@ struct thread_struct {
81 mm_segment_t mm_segment; 80 mm_segment_t mm_segment;
82 unsigned long prot_addr; /* address of protection-excep. */ 81 unsigned long prot_addr; /* address of protection-excep. */
83 unsigned int trap_no; 82 unsigned int trap_no;
84 per_struct per_info; 83 struct per_regs per_user; /* User specified PER registers */
85 /* Used to give failing instruction back to user for ieee exceptions */ 84 struct per_event per_event; /* Cause of the last PER trap */
86 unsigned long ieee_instruction_pointer;
87 /* pfault_wait is used to block the process on a pfault event */ 85 /* pfault_wait is used to block the process on a pfault event */
88 unsigned long pfault_wait; 86 unsigned long pfault_wait;
87 struct list_head list;
89}; 88};
90 89
91typedef struct thread_struct thread_struct; 90typedef struct thread_struct thread_struct;
@@ -150,11 +149,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
150 */ 149 */
151extern unsigned long thread_saved_pc(struct task_struct *t); 150extern unsigned long thread_saved_pc(struct task_struct *t);
152 151
153/*
154 * Print register of task into buffer. Used in fs/proc/array.c.
155 */
156extern void task_show_regs(struct seq_file *m, struct task_struct *task);
157
158extern void show_code(struct pt_regs *regs); 152extern void show_code(struct pt_regs *regs);
159 153
160unsigned long get_wchan(struct task_struct *p); 154unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index e2c218dc68a6..9ad628a8574a 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -331,10 +331,60 @@ struct pt_regs
331 unsigned short ilc; 331 unsigned short ilc;
332 unsigned short svcnr; 332 unsigned short svcnr;
333}; 333};
334
335/*
336 * Program event recording (PER) register set.
337 */
338struct per_regs {
339 unsigned long control; /* PER control bits */
340 unsigned long start; /* PER starting address */
341 unsigned long end; /* PER ending address */
342};
343
344/*
345 * PER event contains information about the cause of the last PER exception.
346 */
347struct per_event {
348 unsigned short cause; /* PER code, ATMID and AI */
349 unsigned long address; /* PER address */
350 unsigned char paid; /* PER access identification */
351};
352
353/*
354 * Simplified per_info structure used to decode the ptrace user space ABI.
355 */
356struct per_struct_kernel {
357 unsigned long cr9; /* PER control bits */
358 unsigned long cr10; /* PER starting address */
359 unsigned long cr11; /* PER ending address */
360 unsigned long bits; /* Obsolete software bits */
361 unsigned long starting_addr; /* User specified start address */
362 unsigned long ending_addr; /* User specified end address */
363 unsigned short perc_atmid; /* PER trap ATMID */
364 unsigned long address; /* PER trap instruction address */
365 unsigned char access_id; /* PER trap access identification */
366};
367
368#define PER_EVENT_MASK 0xE9000000UL
369
370#define PER_EVENT_BRANCH 0x80000000UL
371#define PER_EVENT_IFETCH 0x40000000UL
372#define PER_EVENT_STORE 0x20000000UL
373#define PER_EVENT_STORE_REAL 0x08000000UL
374#define PER_EVENT_NULLIFICATION 0x01000000UL
375
376#define PER_CONTROL_MASK 0x00a00000UL
377
378#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
379#define PER_CONTROL_ALTERATION 0x00200000UL
380
334#endif 381#endif
335 382
336/* 383/*
337 * Now for the program event recording (trace) definitions. 384 * Now for the user space program event recording (trace) definitions.
385 * The following structures are used only for the ptrace interface, don't
386 * touch or even look at it if you don't want to modify the user-space
387 * ptrace interface. In particular stay away from it for in-kernel PER.
338 */ 388 */
339typedef struct 389typedef struct
340{ 390{
@@ -481,8 +531,7 @@ struct user_regs_struct
481 * watchpoints. This is the way intel does it. 531 * watchpoints. This is the way intel does it.
482 */ 532 */
483 per_struct per_info; 533 per_struct per_info;
484 unsigned long ieee_instruction_pointer; 534 unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
485 /* Used to give failing instruction back to user for ieee exceptions */
486}; 535};
487 536
488#ifdef __KERNEL__ 537#ifdef __KERNEL__
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 2ba630276295..15c97625df8d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -139,110 +139,47 @@ struct slib {
139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; 139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
140} __attribute__ ((packed, aligned(2048))); 140} __attribute__ ((packed, aligned(2048)));
141 141
142/** 142#define SBAL_EFLAGS_LAST_ENTRY 0x40
143 * struct sbal_flags - storage block address list flags 143#define SBAL_EFLAGS_CONTIGUOUS 0x20
144 * @last: last entry 144#define SBAL_EFLAGS_FIRST_FRAG 0x04
145 * @cont: contiguous storage 145#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
146 * @frag: fragmentation 146#define SBAL_EFLAGS_LAST_FRAG 0x0c
147 */ 147#define SBAL_EFLAGS_MASK 0x6f
148struct sbal_flags {
149 u8 : 1;
150 u8 last : 1;
151 u8 cont : 1;
152 u8 : 1;
153 u8 frag : 2;
154 u8 : 2;
155} __attribute__ ((packed));
156
157#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
158#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
159#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
160#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
161#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
162 148
163#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL 149#define SBAL_SFLAGS0_PCI_REQ 0x40
150#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
164 151
165/* Awesome OpenFCP extensions */ 152/* Awesome OpenFCP extensions */
166#define SBAL_FLAGS0_TYPE_STATUS 0x00UL 153#define SBAL_SFLAGS0_TYPE_STATUS 0x00
167#define SBAL_FLAGS0_TYPE_WRITE 0x08UL 154#define SBAL_SFLAGS0_TYPE_WRITE 0x08
168#define SBAL_FLAGS0_TYPE_READ 0x10UL 155#define SBAL_SFLAGS0_TYPE_READ 0x10
169#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL 156#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
170#define SBAL_FLAGS0_MORE_SBALS 0x04UL 157#define SBAL_SFLAGS0_MORE_SBALS 0x04
171#define SBAL_FLAGS0_COMMAND 0x02UL 158#define SBAL_SFLAGS0_COMMAND 0x02
172#define SBAL_FLAGS0_LAST_SBAL 0x00UL 159#define SBAL_SFLAGS0_LAST_SBAL 0x00
173#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND 160#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
174#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS 161#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
175#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND 162#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
176#define SBAL_FLAGS0_PCI 0x40
177
178/**
179 * struct sbal_sbalf_0 - sbal flags for sbale 0
180 * @pci: PCI indicator
181 * @cont: data continuation
182 * @sbtype: storage-block type (FCP)
183 */
184struct sbal_sbalf_0 {
185 u8 : 1;
186 u8 pci : 1;
187 u8 cont : 1;
188 u8 sbtype : 2;
189 u8 : 3;
190} __attribute__ ((packed));
191
192/**
193 * struct sbal_sbalf_1 - sbal flags for sbale 1
194 * @key: storage key
195 */
196struct sbal_sbalf_1 {
197 u8 : 4;
198 u8 key : 4;
199} __attribute__ ((packed));
200
201/**
202 * struct sbal_sbalf_14 - sbal flags for sbale 14
203 * @erridx: error index
204 */
205struct sbal_sbalf_14 {
206 u8 : 4;
207 u8 erridx : 4;
208} __attribute__ ((packed));
209
210/**
211 * struct sbal_sbalf_15 - sbal flags for sbale 15
212 * @reason: reason for error state
213 */
214struct sbal_sbalf_15 {
215 u8 reason;
216} __attribute__ ((packed));
217
218/**
219 * union sbal_sbalf - storage block address list flags
220 * @i0: sbalf0
221 * @i1: sbalf1
222 * @i14: sbalf14
223 * @i15: sblaf15
224 * @value: raw value
225 */
226union sbal_sbalf {
227 struct sbal_sbalf_0 i0;
228 struct sbal_sbalf_1 i1;
229 struct sbal_sbalf_14 i14;
230 struct sbal_sbalf_15 i15;
231 u8 value;
232};
233 163
234/** 164/**
235 * struct qdio_buffer_element - SBAL entry 165 * struct qdio_buffer_element - SBAL entry
236 * @flags: flags 166 * @eflags: SBAL entry flags
167 * @scount: SBAL count
168 * @sflags: whole SBAL flags
237 * @length: length 169 * @length: length
238 * @addr: address 170 * @addr: address
239*/ 171*/
240struct qdio_buffer_element { 172struct qdio_buffer_element {
241 u32 flags; 173 u8 eflags;
174 /* private: */
175 u8 res1;
176 /* public: */
177 u8 scount;
178 u8 sflags;
242 u32 length; 179 u32 length;
243#ifdef CONFIG_32BIT 180#ifdef CONFIG_32BIT
244 /* private: */ 181 /* private: */
245 void *reserved; 182 void *res2;
246 /* public: */ 183 /* public: */
247#endif 184#endif
248 void *addr; 185 void *addr;
@@ -360,6 +297,8 @@ struct qdio_initialize {
360 unsigned int no_output_qs; 297 unsigned int no_output_qs;
361 qdio_handler_t *input_handler; 298 qdio_handler_t *input_handler;
362 qdio_handler_t *output_handler; 299 qdio_handler_t *output_handler;
300 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
301 int scan_threshold;
363 unsigned long int_parm; 302 unsigned long int_parm;
364 void **input_sbal_addr_array; 303 void **input_sbal_addr_array;
365 void **output_sbal_addr_array; 304 void **output_sbal_addr_array;
@@ -377,11 +316,13 @@ struct qdio_initialize {
377extern int qdio_allocate(struct qdio_initialize *); 316extern int qdio_allocate(struct qdio_initialize *);
378extern int qdio_establish(struct qdio_initialize *); 317extern int qdio_establish(struct qdio_initialize *);
379extern int qdio_activate(struct ccw_device *); 318extern int qdio_activate(struct ccw_device *);
380 319extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
381extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 320 unsigned int);
382 int q_nr, unsigned int bufnr, unsigned int count); 321extern int qdio_start_irq(struct ccw_device *, int);
383extern int qdio_shutdown(struct ccw_device*, int); 322extern int qdio_stop_irq(struct ccw_device *, int);
323extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
324extern int qdio_shutdown(struct ccw_device *, int);
384extern int qdio_free(struct ccw_device *); 325extern int qdio_free(struct ccw_device *);
385extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); 326extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
386 327
387#endif /* __QDIO_H__ */ 328#endif /* __QDIO_H__ */
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 06cbd1e8c943..90efda0b137d 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -28,39 +28,70 @@ struct qeth_arp_cache_entry {
28 __u8 reserved2[32]; 28 __u8 reserved2[32];
29} __attribute__ ((packed)); 29} __attribute__ ((packed));
30 30
31enum qeth_arp_ipaddrtype {
32 QETHARP_IP_ADDR_V4 = 1,
33 QETHARP_IP_ADDR_V6 = 2,
34};
35struct qeth_arp_entrytype {
36 __u8 mac;
37 __u8 ip;
38} __attribute__((packed));
39
40#define QETH_QARP_MEDIASPECIFIC_BYTES 32
41#define QETH_QARP_MACADDRTYPE_BYTES 1
31struct qeth_arp_qi_entry7 { 42struct qeth_arp_qi_entry7 {
32 __u8 media_specific[32]; 43 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
33 __u8 macaddr_type; 44 struct qeth_arp_entrytype type;
34 __u8 ipaddr_type;
35 __u8 macaddr[6]; 45 __u8 macaddr[6];
36 __u8 ipaddr[4]; 46 __u8 ipaddr[4];
37} __attribute__((packed)); 47} __attribute__((packed));
38 48
49struct qeth_arp_qi_entry7_ipv6 {
50 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
51 struct qeth_arp_entrytype type;
52 __u8 macaddr[6];
53 __u8 ipaddr[16];
54} __attribute__((packed));
55
39struct qeth_arp_qi_entry7_short { 56struct qeth_arp_qi_entry7_short {
40 __u8 macaddr_type; 57 struct qeth_arp_entrytype type;
41 __u8 ipaddr_type;
42 __u8 macaddr[6]; 58 __u8 macaddr[6];
43 __u8 ipaddr[4]; 59 __u8 ipaddr[4];
44} __attribute__((packed)); 60} __attribute__((packed));
45 61
62struct qeth_arp_qi_entry7_short_ipv6 {
63 struct qeth_arp_entrytype type;
64 __u8 macaddr[6];
65 __u8 ipaddr[16];
66} __attribute__((packed));
67
46struct qeth_arp_qi_entry5 { 68struct qeth_arp_qi_entry5 {
47 __u8 media_specific[32]; 69 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
48 __u8 macaddr_type; 70 struct qeth_arp_entrytype type;
49 __u8 ipaddr_type;
50 __u8 ipaddr[4]; 71 __u8 ipaddr[4];
51} __attribute__((packed)); 72} __attribute__((packed));
52 73
74struct qeth_arp_qi_entry5_ipv6 {
75 __u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
76 struct qeth_arp_entrytype type;
77 __u8 ipaddr[16];
78} __attribute__((packed));
79
53struct qeth_arp_qi_entry5_short { 80struct qeth_arp_qi_entry5_short {
54 __u8 macaddr_type; 81 struct qeth_arp_entrytype type;
55 __u8 ipaddr_type;
56 __u8 ipaddr[4]; 82 __u8 ipaddr[4];
57} __attribute__((packed)); 83} __attribute__((packed));
58 84
85struct qeth_arp_qi_entry5_short_ipv6 {
86 struct qeth_arp_entrytype type;
87 __u8 ipaddr[16];
88} __attribute__((packed));
59/* 89/*
60 * can be set by user if no "media specific information" is wanted 90 * can be set by user if no "media specific information" is wanted
61 * -> saves a lot of space in user space buffer 91 * -> saves a lot of space in user space buffer
62 */ 92 */
63#define QETH_QARP_STRIP_ENTRIES 0x8000 93#define QETH_QARP_STRIP_ENTRIES 0x8000
94#define QETH_QARP_WITH_IPV6 0x4000
64#define QETH_QARP_REQUEST_MASK 0x00ff 95#define QETH_QARP_REQUEST_MASK 0x00ff
65 96
66/* data sent to user space as result of query arp ioctl */ 97/* data sent to user space as result of query arp ioctl */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 423fdda2322d..d0eb4653cebd 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -43,29 +43,6 @@
43 43
44#ifdef __KERNEL__ 44#ifdef __KERNEL__
45 45
46#include <linux/list.h>
47#include <linux/spinlock.h>
48
49struct rwsem_waiter;
50
51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
54extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
55extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
56
57/*
58 * the semaphore definition
59 */
60struct rw_semaphore {
61 signed long count;
62 spinlock_t wait_lock;
63 struct list_head wait_list;
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
67};
68
69#ifndef __s390x__ 46#ifndef __s390x__
70#define RWSEM_UNLOCKED_VALUE 0x00000000 47#define RWSEM_UNLOCKED_VALUE 0x00000000
71#define RWSEM_ACTIVE_BIAS 0x00000001 48#define RWSEM_ACTIVE_BIAS 0x00000001
@@ -81,41 +58,6 @@ struct rw_semaphore {
81#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 58#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
82 59
83/* 60/*
84 * initialisation
85 */
86
87#ifdef CONFIG_DEBUG_LOCK_ALLOC
88# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
89#else
90# define __RWSEM_DEP_MAP_INIT(lockname)
91#endif
92
93#define __RWSEM_INITIALIZER(name) \
94 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
95 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
96
97#define DECLARE_RWSEM(name) \
98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
99
100static inline void init_rwsem(struct rw_semaphore *sem)
101{
102 sem->count = RWSEM_UNLOCKED_VALUE;
103 spin_lock_init(&sem->wait_lock);
104 INIT_LIST_HEAD(&sem->wait_list);
105}
106
107extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
108 struct lock_class_key *key);
109
110#define init_rwsem(sem) \
111do { \
112 static struct lock_class_key __key; \
113 \
114 __init_rwsem((sem), #sem, &__key); \
115} while (0)
116
117
118/*
119 * lock for reading 61 * lock for reading
120 */ 62 */
121static inline void __down_read(struct rw_semaphore *sem) 63static inline void __down_read(struct rw_semaphore *sem)
@@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
377 return new; 319 return new;
378} 320}
379 321
380static inline int rwsem_is_locked(struct rw_semaphore *sem)
381{
382 return (sem->count != 0);
383}
384
385#endif /* __KERNEL__ */ 322#endif /* __KERNEL__ */
386#endif /* _S390_RWSEM_H */ 323#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
deleted file mode 100644
index 2afc060266a2..000000000000
--- a/arch/s390/include/asm/s390_ext.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _S390_EXTINT_H
2#define _S390_EXTINT_H
3
4/*
5 * include/asm-s390/s390_ext.h
6 *
7 * S390 version
8 * Copyright IBM Corp. 1999,2007
9 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
10 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 */
12
13#include <linux/types.h>
14
15typedef void (*ext_int_handler_t)(__u16 code);
16
17typedef struct ext_int_info_t {
18 struct ext_int_info_t *next;
19 ext_int_handler_t handler;
20 __u16 code;
21} ext_int_info_t;
22
23extern ext_int_info_t *ext_int_hash[];
24
25int register_external_interrupt(__u16 code, ext_int_handler_t handler);
26int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
27 ext_int_info_t *info);
28int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
29int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
30 ext_int_info_t *info);
31
32#endif
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786fe93ae..6d45ef6c12a7 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
1#include <asm-generic/scatterlist.h> 1#include <asm-generic/scatterlist.h>
2
3#define ARCH_HAS_SG_CHAIN
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 25e831d58e1e..d5e2ef10537d 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -73,6 +73,7 @@ extern unsigned int user_mode;
73#define MACHINE_FLAG_PFMF (1UL << 11) 73#define MACHINE_FLAG_PFMF (1UL << 11)
74#define MACHINE_FLAG_LPAR (1UL << 12) 74#define MACHINE_FLAG_LPAR (1UL << 12)
75#define MACHINE_FLAG_SPP (1UL << 13) 75#define MACHINE_FLAG_SPP (1UL << 13)
76#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
76 77
77#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 78#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
78#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 79#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -90,6 +91,7 @@ extern unsigned int user_mode;
90#define MACHINE_HAS_HPAGE (0) 91#define MACHINE_HAS_HPAGE (0)
91#define MACHINE_HAS_PFMF (0) 92#define MACHINE_HAS_PFMF (0)
92#define MACHINE_HAS_SPP (0) 93#define MACHINE_HAS_SPP (0)
94#define MACHINE_HAS_TOPOLOGY (0)
93#else /* __s390x__ */ 95#else /* __s390x__ */
94#define MACHINE_HAS_IEEE (1) 96#define MACHINE_HAS_IEEE (1)
95#define MACHINE_HAS_CSP (1) 97#define MACHINE_HAS_CSP (1)
@@ -100,6 +102,7 @@ extern unsigned int user_mode;
100#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE) 102#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
101#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 103#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
102#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 104#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
105#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
103#endif /* __s390x__ */ 106#endif /* __s390x__ */
104 107
105#define ZFCPDUMP_HSA_SIZE (32UL<<20) 108#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index edc03cb9cd79..045e009fc164 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -20,7 +20,6 @@ extern void machine_power_off_smp(void);
20 20
21extern int __cpu_disable (void); 21extern int __cpu_disable (void);
22extern void __cpu_die (unsigned int cpu); 22extern void __cpu_die (unsigned int cpu);
23extern void cpu_die (void) __attribute__ ((noreturn));
24extern int __cpu_up (unsigned int cpu); 23extern int __cpu_up (unsigned int cpu);
25 24
26extern struct mutex smp_cpu_state_mutex; 25extern struct mutex smp_cpu_state_mutex;
@@ -71,8 +70,10 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
71 70
72#ifdef CONFIG_HOTPLUG_CPU 71#ifdef CONFIG_HOTPLUG_CPU
73extern int smp_rescan_cpus(void); 72extern int smp_rescan_cpus(void);
73extern void __noreturn cpu_die(void);
74#else 74#else
75static inline int smp_rescan_cpus(void) { return 0; } 75static inline int smp_rescan_cpus(void) { return 0; }
76static inline void cpu_die(void) { }
76#endif 77#endif
77 78
78#endif /* __ASM_SMP_H */ 79#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644
index dc75c616eafe..000000000000
--- a/arch/s390/include/asm/suspend.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_S390_SUSPEND_H
2#define __ASM_S390_SUSPEND_H
3
4static inline int arch_prepare_suspend(void)
5{
6 return 0;
7}
8
9#endif
10
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 8429686951f9..5c0246b955d8 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -65,8 +65,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
65 if (test_tsk_thread_flag(task, TIF_31BIT)) 65 if (test_tsk_thread_flag(task, TIF_31BIT))
66 mask = 0xffffffff; 66 mask = 0xffffffff;
67#endif 67#endif
68 if (i + n == 6)
69 args[--n] = regs->args[0] & mask;
70 while (n-- > 0) 68 while (n-- > 0)
71 if (i + n > 0) 69 if (i + n > 0)
72 args[n] = regs->gprs[2 + i + n] & mask; 70 args[n] = regs->gprs[2 + i + n] & mask;
@@ -80,8 +78,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
80 const unsigned long *args) 78 const unsigned long *args)
81{ 79{
82 BUG_ON(i + n > 6); 80 BUG_ON(i + n > 6);
83 if (i + n == 6)
84 regs->args[0] = args[--n];
85 while (n-- > 0) 81 while (n-- > 0)
86 if (i + n > 0) 82 if (i + n > 0)
87 regs->gprs[2 + i + n] = args[n]; 83 regs->gprs[2 + i + n] = args[n];
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 22bdb2a0ee5f..79d3d6e2e9c5 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -14,8 +14,13 @@
14#ifndef __ASM_S390_SYSINFO_H 14#ifndef __ASM_S390_SYSINFO_H
15#define __ASM_S390_SYSINFO_H 15#define __ASM_S390_SYSINFO_H
16 16
17#include <asm/bitsperlong.h>
18
17struct sysinfo_1_1_1 { 19struct sysinfo_1_1_1 {
18 char reserved_0[32]; 20 unsigned short :16;
21 unsigned char ccr;
22 unsigned char cai;
23 char reserved_0[28];
19 char manufacturer[16]; 24 char manufacturer[16];
20 char type[4]; 25 char type[4];
21 char reserved_1[12]; 26 char reserved_1[12];
@@ -104,6 +109,39 @@ struct sysinfo_3_2_2 {
104 char reserved_544[3552]; 109 char reserved_544[3552];
105}; 110};
106 111
112#define TOPOLOGY_CPU_BITS 64
113#define TOPOLOGY_NR_MAG 6
114
115struct topology_cpu {
116 unsigned char reserved0[4];
117 unsigned char :6;
118 unsigned char pp:2;
119 unsigned char reserved1;
120 unsigned short origin;
121 unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG];
122};
123
124struct topology_container {
125 unsigned char reserved[7];
126 unsigned char id;
127};
128
129union topology_entry {
130 unsigned char nl;
131 struct topology_cpu cpu;
132 struct topology_container container;
133};
134
135struct sysinfo_15_1_x {
136 unsigned char reserved0[2];
137 unsigned short length;
138 unsigned char mag[TOPOLOGY_NR_MAG];
139 unsigned char reserved1;
140 unsigned char mnest;
141 unsigned char reserved2[4];
142 union topology_entry tle[0];
143};
144
107static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) 145static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
108{ 146{
109 register int r0 asm("0") = (fc << 28) | sel1; 147 register int r0 asm("0") = (fc << 28) | sel1;
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index cef66210c846..d382629a0172 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -14,12 +14,14 @@
14#include <asm/setup.h> 14#include <asm/setup.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17#include <asm/cmpxchg.h>
17 18
18#ifdef __KERNEL__ 19#ifdef __KERNEL__
19 20
20struct task_struct; 21struct task_struct;
21 22
22extern struct task_struct *__switch_to(void *, void *); 23extern struct task_struct *__switch_to(void *, void *);
24extern void update_per_regs(struct task_struct *task);
23 25
24static inline void save_fp_regs(s390_fp_regs *fpregs) 26static inline void save_fp_regs(s390_fp_regs *fpregs)
25{ 27{
@@ -85,26 +87,26 @@ static inline void restore_access_regs(unsigned int *acrs)
85 asm volatile("lam 0,15,%0" : : "Q" (*acrs)); 87 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
86} 88}
87 89
88#define switch_to(prev,next,last) do { \ 90#define switch_to(prev,next,last) do { \
89 if (prev == next) \ 91 if (prev->mm) { \
90 break; \ 92 save_fp_regs(&prev->thread.fp_regs); \
91 save_fp_regs(&prev->thread.fp_regs); \ 93 save_access_regs(&prev->thread.acrs[0]); \
92 restore_fp_regs(&next->thread.fp_regs); \ 94 } \
93 save_access_regs(&prev->thread.acrs[0]); \ 95 if (next->mm) { \
94 restore_access_regs(&next->thread.acrs[0]); \ 96 restore_fp_regs(&next->thread.fp_regs); \
95 prev = __switch_to(prev,next); \ 97 restore_access_regs(&next->thread.acrs[0]); \
98 update_per_regs(next); \
99 } \
100 prev = __switch_to(prev,next); \
96} while (0) 101} while (0)
97 102
98extern void account_vtime(struct task_struct *, struct task_struct *); 103extern void account_vtime(struct task_struct *, struct task_struct *);
99extern void account_tick_vtime(struct task_struct *); 104extern void account_tick_vtime(struct task_struct *);
100extern void account_system_vtime(struct task_struct *);
101 105
102#ifdef CONFIG_PFAULT 106#ifdef CONFIG_PFAULT
103extern void pfault_irq_init(void);
104extern int pfault_init(void); 107extern int pfault_init(void);
105extern void pfault_fini(void); 108extern void pfault_fini(void);
106#else /* CONFIG_PFAULT */ 109#else /* CONFIG_PFAULT */
107#define pfault_irq_init() do { } while (0)
108#define pfault_init() ({-1;}) 110#define pfault_init() ({-1;})
109#define pfault_fini() do { } while (0) 111#define pfault_fini() do { } while (0)
110#endif /* CONFIG_PFAULT */ 112#endif /* CONFIG_PFAULT */
@@ -119,161 +121,6 @@ extern int memcpy_real(void *, void *, size_t);
119 121
120#define nop() asm volatile("nop") 122#define nop() asm volatile("nop")
121 123
122#define xchg(ptr,x) \
123({ \
124 __typeof__(*(ptr)) __ret; \
125 __ret = (__typeof__(*(ptr))) \
126 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
127 __ret; \
128})
129
130extern void __xchg_called_with_bad_pointer(void);
131
132static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
133{
134 unsigned long addr, old;
135 int shift;
136
137 switch (size) {
138 case 1:
139 addr = (unsigned long) ptr;
140 shift = (3 ^ (addr & 3)) << 3;
141 addr ^= addr & 3;
142 asm volatile(
143 " l %0,%4\n"
144 "0: lr 0,%0\n"
145 " nr 0,%3\n"
146 " or 0,%2\n"
147 " cs %0,0,%4\n"
148 " jl 0b\n"
149 : "=&d" (old), "=Q" (*(int *) addr)
150 : "d" (x << shift), "d" (~(255 << shift)),
151 "Q" (*(int *) addr) : "memory", "cc", "0");
152 return old >> shift;
153 case 2:
154 addr = (unsigned long) ptr;
155 shift = (2 ^ (addr & 2)) << 3;
156 addr ^= addr & 2;
157 asm volatile(
158 " l %0,%4\n"
159 "0: lr 0,%0\n"
160 " nr 0,%3\n"
161 " or 0,%2\n"
162 " cs %0,0,%4\n"
163 " jl 0b\n"
164 : "=&d" (old), "=Q" (*(int *) addr)
165 : "d" (x << shift), "d" (~(65535 << shift)),
166 "Q" (*(int *) addr) : "memory", "cc", "0");
167 return old >> shift;
168 case 4:
169 asm volatile(
170 " l %0,%3\n"
171 "0: cs %0,%2,%3\n"
172 " jl 0b\n"
173 : "=&d" (old), "=Q" (*(int *) ptr)
174 : "d" (x), "Q" (*(int *) ptr)
175 : "memory", "cc");
176 return old;
177#ifdef __s390x__
178 case 8:
179 asm volatile(
180 " lg %0,%3\n"
181 "0: csg %0,%2,%3\n"
182 " jl 0b\n"
183 : "=&d" (old), "=m" (*(long *) ptr)
184 : "d" (x), "Q" (*(long *) ptr)
185 : "memory", "cc");
186 return old;
187#endif /* __s390x__ */
188 }
189 __xchg_called_with_bad_pointer();
190 return x;
191}
192
193/*
194 * Atomic compare and exchange. Compare OLD with MEM, if identical,
195 * store NEW in MEM. Return the initial value in MEM. Success is
196 * indicated by comparing RETURN with OLD.
197 */
198
199#define __HAVE_ARCH_CMPXCHG 1
200
201#define cmpxchg(ptr, o, n) \
202 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
203 (unsigned long)(n), sizeof(*(ptr))))
204
205extern void __cmpxchg_called_with_bad_pointer(void);
206
207static inline unsigned long
208__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
209{
210 unsigned long addr, prev, tmp;
211 int shift;
212
213 switch (size) {
214 case 1:
215 addr = (unsigned long) ptr;
216 shift = (3 ^ (addr & 3)) << 3;
217 addr ^= addr & 3;
218 asm volatile(
219 " l %0,%2\n"
220 "0: nr %0,%5\n"
221 " lr %1,%0\n"
222 " or %0,%3\n"
223 " or %1,%4\n"
224 " cs %0,%1,%2\n"
225 " jnl 1f\n"
226 " xr %1,%0\n"
227 " nr %1,%5\n"
228 " jnz 0b\n"
229 "1:"
230 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
231 : "d" (old << shift), "d" (new << shift),
232 "d" (~(255 << shift)), "Q" (*(int *) ptr)
233 : "memory", "cc");
234 return prev >> shift;
235 case 2:
236 addr = (unsigned long) ptr;
237 shift = (2 ^ (addr & 2)) << 3;
238 addr ^= addr & 2;
239 asm volatile(
240 " l %0,%2\n"
241 "0: nr %0,%5\n"
242 " lr %1,%0\n"
243 " or %0,%3\n"
244 " or %1,%4\n"
245 " cs %0,%1,%2\n"
246 " jnl 1f\n"
247 " xr %1,%0\n"
248 " nr %1,%5\n"
249 " jnz 0b\n"
250 "1:"
251 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
252 : "d" (old << shift), "d" (new << shift),
253 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
254 : "memory", "cc");
255 return prev >> shift;
256 case 4:
257 asm volatile(
258 " cs %0,%3,%1\n"
259 : "=&d" (prev), "=Q" (*(int *) ptr)
260 : "0" (old), "d" (new), "Q" (*(int *) ptr)
261 : "memory", "cc");
262 return prev;
263#ifdef __s390x__
264 case 8:
265 asm volatile(
266 " csg %0,%3,%1\n"
267 : "=&d" (prev), "=Q" (*(long *) ptr)
268 : "0" (old), "d" (new), "Q" (*(long *) ptr)
269 : "memory", "cc");
270 return prev;
271#endif /* __s390x__ */
272 }
273 __cmpxchg_called_with_bad_pointer();
274 return old;
275}
276
277/* 124/*
278 * Force strict CPU ordering. 125 * Force strict CPU ordering.
279 * And yes, this is required on UP too when we're talking 126 * And yes, this is required on UP too when we're talking
@@ -352,46 +199,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
352 __ctl_load(__dummy, cr, cr); \ 199 __ctl_load(__dummy, cr, cr); \
353}) 200})
354 201
355#include <linux/irqflags.h>
356
357#include <asm-generic/cmpxchg-local.h>
358
359static inline unsigned long __cmpxchg_local(volatile void *ptr,
360 unsigned long old,
361 unsigned long new, int size)
362{
363 switch (size) {
364 case 1:
365 case 2:
366 case 4:
367#ifdef __s390x__
368 case 8:
369#endif
370 return __cmpxchg(ptr, old, new, size);
371 default:
372 return __cmpxchg_local_generic(ptr, old, new, size);
373 }
374
375 return old;
376}
377
378/*
379 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
380 * them available.
381 */
382#define cmpxchg_local(ptr, o, n) \
383 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
384 (unsigned long)(n), sizeof(*(ptr))))
385#ifdef __s390x__
386#define cmpxchg64_local(ptr, o, n) \
387 ({ \
388 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
389 cmpxchg_local((ptr), (o), (n)); \
390 })
391#else
392#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
393#endif
394
395/* 202/*
396 * Use to set psw mask except for the first byte which 203 * Use to set psw mask except for the first byte which
397 * won't be changed by this function. 204 * won't be changed by this function.
@@ -399,7 +206,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
399static inline void 206static inline void
400__set_psw_mask(unsigned long mask) 207__set_psw_mask(unsigned long mask)
401{ 208{
402 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 209 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
403} 210}
404 211
405#define local_mcck_enable() __set_psw_mask(psw_kernel_bits) 212#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
@@ -419,30 +226,21 @@ extern void smp_ctl_clear_bit(int cr, int bit);
419 226
420#endif /* CONFIG_SMP */ 227#endif /* CONFIG_SMP */
421 228
422static inline unsigned int stfl(void) 229#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
423{
424 asm volatile(
425 " .insn s,0xb2b10000,0(0)\n" /* stfl */
426 "0:\n"
427 EX_TABLE(0b,0b));
428 return S390_lowcore.stfl_fac_list;
429}
430 230
431static inline int __stfle(unsigned long long *list, int doublewords) 231/*
232 * The test_facility function uses the bit odering where the MSB is bit 0.
233 * That makes it easier to query facility bits with the bit number as
234 * documented in the Principles of Operation.
235 */
236static inline int test_facility(unsigned long nr)
432{ 237{
433 typedef struct { unsigned long long _[doublewords]; } addrtype; 238 unsigned char *ptr;
434 register unsigned long __nr asm("0") = doublewords - 1;
435 239
436 asm volatile(".insn s,0xb2b00000,%0" /* stfle */ 240 if (nr >= MAX_FACILITY_BIT)
437 : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); 241 return 0;
438 return __nr + 1; 242 ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
439} 243 return (*ptr & (0x80 >> (nr & 7))) != 0;
440
441static inline int stfle(unsigned long long *list, int doublewords)
442{
443 if (!(stfl() & (1UL << 24)))
444 return -EOPNOTSUPP;
445 return __stfle(list, doublewords);
446} 244}
447 245
448static inline unsigned short stap(void) 246static inline unsigned short stap(void)
@@ -457,7 +255,7 @@ extern void (*_machine_restart)(char *command);
457extern void (*_machine_halt)(void); 255extern void (*_machine_halt)(void);
458extern void (*_machine_power_off)(void); 256extern void (*_machine_power_off)(void);
459 257
460#define arch_align_stack(x) (x) 258extern unsigned long arch_align_stack(unsigned long sp);
461 259
462static inline int tprot(unsigned long addr) 260static inline int tprot(unsigned long addr)
463{ 261{
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 5baf0230b29b..ad1382f7932e 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -74,7 +74,7 @@ struct thread_info {
74/* how to get the thread information struct from C */ 74/* how to get the thread information struct from C */
75static inline struct thread_info *current_thread_info(void) 75static inline struct thread_info *current_thread_info(void)
76{ 76{
77 return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE); 77 return (struct thread_info *) S390_lowcore.thread_info;
78} 78}
79 79
80#define THREAD_SIZE_ORDER THREAD_ORDER 80#define THREAD_SIZE_ORDER THREAD_ORDER
@@ -88,7 +88,7 @@ static inline struct thread_info *current_thread_info(void)
88#define TIF_SIGPENDING 2 /* signal pending */ 88#define TIF_SIGPENDING 2 /* signal pending */
89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 89#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 90#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
91#define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 91#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 92#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 93#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
@@ -99,14 +99,15 @@ static inline struct thread_info *current_thread_info(void)
99#define TIF_31BIT 17 /* 32bit process */ 99#define TIF_31BIT 17 /* 32bit process */
100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 100#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 101#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
102#define TIF_FREEZE 20 /* thread is freezing for suspend */ 102#define TIF_SINGLE_STEP 20 /* This task is single stepped */
103#define TIF_FREEZE 21 /* thread is freezing for suspend */
103 104
104#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 105#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
105#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 106#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
106#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 107#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
107#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 108#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
108#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 109#define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC)
109#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 110#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
110#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 111#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 113#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
@@ -114,8 +115,15 @@ static inline struct thread_info *current_thread_info(void)
114#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 115#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
116#define _TIF_31BIT (1<<TIF_31BIT) 117#define _TIF_31BIT (1<<TIF_31BIT)
118#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
117#define _TIF_FREEZE (1<<TIF_FREEZE) 119#define _TIF_FREEZE (1<<TIF_FREEZE)
118 120
121#ifdef CONFIG_64BIT
122#define is_32bit_task() (test_thread_flag(TIF_31BIT))
123#else
124#define is_32bit_task() (1)
125#endif
126
119#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
120 128
121#define PREEMPT_ACTIVE 0x4000000 129#define PREEMPT_ACTIVE 0x4000000
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 09d345a701dc..88829a40af6f 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -11,6 +11,8 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14#include <asm/lowcore.h>
15
14/* The value of the TOD clock for 1.1.1970. */ 16/* The value of the TOD clock for 1.1.1970. */
15#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL 17#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
16 18
@@ -49,6 +51,24 @@ static inline void store_clock_comparator(__u64 *time)
49 asm volatile("stckc %0" : "=Q" (*time)); 51 asm volatile("stckc %0" : "=Q" (*time));
50} 52}
51 53
54void clock_comparator_work(void);
55
56static inline unsigned long long local_tick_disable(void)
57{
58 unsigned long long old;
59
60 old = S390_lowcore.clock_comparator;
61 S390_lowcore.clock_comparator = -1ULL;
62 set_clock_comparator(S390_lowcore.clock_comparator);
63 return old;
64}
65
66static inline void local_tick_enable(unsigned long long comp)
67{
68 S390_lowcore.clock_comparator = comp;
69 set_clock_comparator(S390_lowcore.clock_comparator);
70}
71
52#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 72#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
53 73
54typedef unsigned long long cycles_t; 74typedef unsigned long long cycles_t;
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index fd1c00d08bf5..c687a2c83462 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,70 +22,77 @@
22 */ 22 */
23 23
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/pagemap.h>
25#include <linux/swap.h> 26#include <linux/swap.h>
26#include <asm/processor.h> 27#include <asm/processor.h>
27#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
28#include <asm/smp.h>
29#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
30 30
31#ifndef CONFIG_SMP
32#define TLB_NR_PTRS 1
33#else
34#define TLB_NR_PTRS 508
35#endif
36
37struct mmu_gather { 31struct mmu_gather {
38 struct mm_struct *mm; 32 struct mm_struct *mm;
33#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch;
35#endif
39 unsigned int fullmm; 36 unsigned int fullmm;
40 unsigned int nr_ptes; 37 unsigned int need_flush;
41 unsigned int nr_pxds;
42 void *array[TLB_NR_PTRS];
43}; 38};
44 39
45DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 40#ifdef CONFIG_HAVE_RCU_TABLE_FREE
41struct mmu_table_batch {
42 struct rcu_head rcu;
43 unsigned int nr;
44 void *tables[0];
45};
46 46
47static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, 47#define MAX_TABLE_BATCH \
48 unsigned int full_mm_flush) 48 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
49{
50 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
51 49
50extern void tlb_table_flush(struct mmu_gather *tlb);
51extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
53
54static inline void tlb_gather_mmu(struct mmu_gather *tlb,
55 struct mm_struct *mm,
56 unsigned int full_mm_flush)
57{
52 tlb->mm = mm; 58 tlb->mm = mm;
53 tlb->fullmm = full_mm_flush; 59 tlb->fullmm = full_mm_flush;
54 tlb->nr_ptes = 0; 60 tlb->need_flush = 0;
55 tlb->nr_pxds = TLB_NR_PTRS; 61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL;
63#endif
56 if (tlb->fullmm) 64 if (tlb->fullmm)
57 __tlb_flush_mm(mm); 65 __tlb_flush_mm(mm);
58 return tlb;
59} 66}
60 67
61static inline void tlb_flush_mmu(struct mmu_gather *tlb, 68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
62 unsigned long start, unsigned long end)
63{ 69{
64 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) 70 if (!tlb->need_flush)
65 __tlb_flush_mm(tlb->mm); 71 return;
66 while (tlb->nr_ptes > 0) 72 tlb->need_flush = 0;
67 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); 73 __tlb_flush_mm(tlb->mm);
68 while (tlb->nr_pxds < TLB_NR_PTRS) 74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
69 /* pgd_free frees the pointer as region or segment table */ 75 tlb_table_flush(tlb);
70 pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]); 76#endif
71} 77}
72 78
73static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end) 80 unsigned long start, unsigned long end)
75{ 81{
76 tlb_flush_mmu(tlb, start, end); 82 tlb_flush_mmu(tlb);
77
78 /* keep the page table cache within bounds */
79 check_pgt_cache();
80
81 put_cpu_var(mmu_gathers);
82} 83}
83 84
84/* 85/*
85 * Release the page cache reference for a pte removed by 86 * Release the page cache reference for a pte removed by
86 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page 87 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
87 * has already been freed, so just do free_page_and_swap_cache. 88 * has already been freed, so just do free_page_and_swap_cache.
88 */ 89 */
90static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91{
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94}
95
89static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 96static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
90{ 97{
91 free_page_and_swap_cache(page); 98 free_page_and_swap_cache(page);
@@ -98,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
98static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
99 unsigned long address) 106 unsigned long address)
100{ 107{
101 if (!tlb->fullmm) { 108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
102 tlb->array[tlb->nr_ptes++] = pte; 109 if (!tlb->fullmm)
103 if (tlb->nr_ptes >= tlb->nr_pxds) 110 return page_table_free_rcu(tlb, (unsigned long *) pte);
104 tlb_flush_mmu(tlb, 0, 0); 111#endif
105 } else 112 page_table_free(tlb->mm, (unsigned long *) pte);
106 pte_free(tlb->mm, pte);
107} 113}
108 114
109/* 115/*
@@ -119,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
119#ifdef __s390x__ 125#ifdef __s390x__
120 if (tlb->mm->context.asce_limit <= (1UL << 31)) 126 if (tlb->mm->context.asce_limit <= (1UL << 31))
121 return; 127 return;
122 if (!tlb->fullmm) { 128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
123 tlb->array[--tlb->nr_pxds] = pmd; 129 if (!tlb->fullmm)
124 if (tlb->nr_ptes >= tlb->nr_pxds) 130 return tlb_remove_table(tlb, pmd);
125 tlb_flush_mmu(tlb, 0, 0); 131#endif
126 } else 132 crst_table_free(tlb->mm, (unsigned long *) pmd);
127 pmd_free(tlb->mm, pmd);
128#endif 133#endif
129} 134}
130 135
@@ -141,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
141#ifdef __s390x__ 146#ifdef __s390x__
142 if (tlb->mm->context.asce_limit <= (1UL << 42)) 147 if (tlb->mm->context.asce_limit <= (1UL << 42))
143 return; 148 return;
144 if (!tlb->fullmm) { 149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
145 tlb->array[--tlb->nr_pxds] = pud; 150 if (!tlb->fullmm)
146 if (tlb->nr_ptes >= tlb->nr_pxds) 151 return tlb_remove_table(tlb, pud);
147 tlb_flush_mmu(tlb, 0, 0); 152#endif
148 } else 153 crst_table_free(tlb->mm, (unsigned long *) pud);
149 pud_free(tlb->mm, pud);
150#endif 154#endif
151} 155}
152 156
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 29d5d6d4becc..b7a4f2eb0057 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -50,7 +50,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
50 /* 50 /*
51 * If the process only ran on the local cpu, do a local flush. 51 * If the process only ran on the local cpu, do a local flush.
52 */ 52 */
53 local_cpumask = cpumask_of_cpu(smp_processor_id()); 53 cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) 54 if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
55 __tlb_flush_local(); 55 __tlb_flush_local();
56 else 56 else
@@ -80,16 +80,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * on all cpus instead of doing a local flush if the mm 80 * on all cpus instead of doing a local flush if the mm
81 * only ran on the local cpu. 81 * only ran on the local cpu.
82 */ 82 */
83 if (MACHINE_HAS_IDTE) { 83 if (MACHINE_HAS_IDTE)
84 if (mm->context.noexec)
85 __tlb_flush_idte((unsigned long)
86 get_shadow_table(mm->pgd) |
87 mm->context.asce_bits);
88 __tlb_flush_idte((unsigned long) mm->pgd | 84 __tlb_flush_idte((unsigned long) mm->pgd |
89 mm->context.asce_bits); 85 mm->context.asce_bits);
90 return; 86 else
91 } 87 __tlb_flush_full(mm);
92 __tlb_flush_full(mm);
93} 88}
94 89
95static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 90static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 831bd033ea77..005d77d8ae2a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -2,19 +2,38 @@
2#define _ASM_S390_TOPOLOGY_H 2#define _ASM_S390_TOPOLOGY_H
3 3
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5 5#include <asm/sysinfo.h>
6#define mc_capable() (1)
7
8const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 6
10extern unsigned char cpu_core_id[NR_CPUS]; 7extern unsigned char cpu_core_id[NR_CPUS];
11extern cpumask_t cpu_core_map[NR_CPUS]; 8extern cpumask_t cpu_core_map[NR_CPUS];
12 9
10static inline const struct cpumask *cpu_coregroup_mask(int cpu)
11{
12 return &cpu_core_map[cpu];
13}
14
13#define topology_core_id(cpu) (cpu_core_id[cpu]) 15#define topology_core_id(cpu) (cpu_core_id[cpu])
14#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 16#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
17#define mc_capable() (1)
18
19#ifdef CONFIG_SCHED_BOOK
20
21extern unsigned char cpu_book_id[NR_CPUS];
22extern cpumask_t cpu_book_map[NR_CPUS];
23
24static inline const struct cpumask *cpu_book_mask(int cpu)
25{
26 return &cpu_book_map[cpu];
27}
28
29#define topology_book_id(cpu) (cpu_book_id[cpu])
30#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
31
32#endif /* CONFIG_SCHED_BOOK */
15 33
16int topology_set_cpu_management(int fc); 34int topology_set_cpu_management(int fc);
17void topology_schedule_update(void); 35void topology_schedule_update(void);
36void store_topology(struct sysinfo_15_1_x *info);
18 37
19#define POLARIZATION_UNKNWN (-1) 38#define POLARIZATION_UNKNWN (-1)
20#define POLARIZATION_HRZ (0) 39#define POLARIZATION_HRZ (0)
@@ -30,6 +49,8 @@ static inline void s390_init_cpu_topology(void)
30}; 49};
31#endif 50#endif
32 51
52#define SD_BOOK_INIT SD_CPU_INIT
53
33#include <asm-generic/topology.h> 54#include <asm-generic/topology.h>
34 55
35#endif /* _ASM_S390_TOPOLOGY_H */ 56#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 04d6b95a89c6..eeb52ccf499f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -30,14 +30,6 @@ typedef __signed__ long saddr_t;
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
33typedef u64 dma64_addr_t;
34#ifdef __s390x__
35/* DMA addresses come in 32-bit and 64-bit flavours. */
36typedef u64 dma_addr_t;
37#else
38typedef u32 dma_addr_t;
39#endif
40
41#ifndef __s390x__ 33#ifndef __s390x__
42typedef union { 34typedef union {
43 unsigned long long pair; 35 unsigned long long pair;
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index d6b1ed0ec52b..2b23885e81e9 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -49,12 +49,13 @@
49 49
50#define segment_eq(a,b) ((a).ar4 == (b).ar4) 50#define segment_eq(a,b) ((a).ar4 == (b).ar4)
51 51
52#define __access_ok(addr, size) \
53({ \
54 __chk_user_ptr(addr); \
55 1; \
56})
52 57
53static inline int __access_ok(const void __user *addr, unsigned long size) 58#define access_ok(type, addr, size) __access_ok(addr, size)
54{
55 return 1;
56}
57#define access_ok(type,addr,size) __access_ok(addr,size)
58 59
59/* 60/*
60 * The exception table consists of pairs of addresses: the first is the 61 * The exception table consists of pairs of addresses: the first is the
@@ -83,8 +84,8 @@ struct uaccess_ops {
83 size_t (*clear_user)(size_t, void __user *); 84 size_t (*clear_user)(size_t, void __user *);
84 size_t (*strnlen_user)(size_t, const char __user *); 85 size_t (*strnlen_user)(size_t, const char __user *);
85 size_t (*strncpy_from_user)(size_t, const char __user *, char *); 86 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
86 int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); 87 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
87 int (*futex_atomic_cmpxchg)(int __user *, int old, int new); 88 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
88}; 89};
89 90
90extern struct uaccess_ops uaccess; 91extern struct uaccess_ops uaccess;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 1049ef27c15e..404bdb9671b4 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -272,7 +272,12 @@
272#define __NR_fanotify_init 332 272#define __NR_fanotify_init 332
273#define __NR_fanotify_mark 333 273#define __NR_fanotify_mark 333
274#define __NR_prlimit64 334 274#define __NR_prlimit64 334
275#define NR_syscalls 335 275#define __NR_name_to_handle_at 335
276#define __NR_open_by_handle_at 336
277#define __NR_clock_adjtime 337
278#define __NR_syncfs 338
279#define __NR_setns 339
280#define NR_syscalls 340
276 281
277/* 282/*
278 * There are some system calls that are not present on 64 bit, some 283 * There are some system calls that are not present on 64 bit, some
@@ -381,6 +386,7 @@
381 386
382/* Ignore system calls that are also reachable via sys_socket */ 387/* Ignore system calls that are also reachable via sys_socket */
383#define __IGNORE_recvmmsg 388#define __IGNORE_recvmmsg
389#define __IGNORE_sendmmsg
384 390
385#define __ARCH_WANT_IPC_PARSE_VERSION 391#define __ARCH_WANT_IPC_PARSE_VERSION
386#define __ARCH_WANT_OLD_READDIR 392#define __ARCH_WANT_OLD_READDIR