diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 21 | ||||
-rw-r--r-- | arch/s390/Kconfig.debug | 3 | ||||
-rw-r--r-- | arch/s390/Makefile | 3 | ||||
-rw-r--r-- | arch/s390/boot/compressed/Makefile | 6 | ||||
-rw-r--r-- | arch/s390/boot/compressed/misc.c | 5 | ||||
-rw-r--r-- | arch/s390/include/asm/cacheflush.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/futex.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/rwsem.h | 63 | ||||
-rw-r--r-- | arch/s390/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.h | 8 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 17 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_std.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 55 | ||||
-rw-r--r-- | arch/s390/oprofile/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/oprofile/hwsampler.c | 1256 | ||||
-rw-r--r-- | arch/s390/oprofile/hwsampler.h | 113 | ||||
-rw-r--r-- | arch/s390/oprofile/init.c | 165 |
20 files changed, 1642 insertions, 108 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 636bcb81d068..2508a6f31588 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -85,6 +85,7 @@ config S390 | |||
85 | select HAVE_KERNEL_BZIP2 | 85 | select HAVE_KERNEL_BZIP2 |
86 | select HAVE_KERNEL_LZMA | 86 | select HAVE_KERNEL_LZMA |
87 | select HAVE_KERNEL_LZO | 87 | select HAVE_KERNEL_LZO |
88 | select HAVE_KERNEL_XZ | ||
88 | select HAVE_GET_USER_PAGES_FAST | 89 | select HAVE_GET_USER_PAGES_FAST |
89 | select HAVE_ARCH_MUTEX_CPU_RELAX | 90 | select HAVE_ARCH_MUTEX_CPU_RELAX |
90 | select ARCH_INLINE_SPIN_TRYLOCK | 91 | select ARCH_INLINE_SPIN_TRYLOCK |
@@ -341,26 +342,16 @@ config STACK_GUARD | |||
341 | The minimum size for the stack guard should be 256 for 31 bit and | 342 | The minimum size for the stack guard should be 256 for 31 bit and |
342 | 512 for 64 bit. | 343 | 512 for 64 bit. |
343 | 344 | ||
344 | config WARN_STACK | 345 | config WARN_DYNAMIC_STACK |
345 | def_bool n | 346 | def_bool n |
346 | prompt "Emit compiler warnings for function with broken stack usage" | 347 | prompt "Emit compiler warnings for function with dynamic stack usage" |
347 | help | 348 | help |
348 | This option enables the compiler options -mwarn-framesize and | 349 | This option enables the compiler option -mwarn-dynamicstack. If the |
349 | -mwarn-dynamicstack. If the compiler supports these options it | 350 | compiler supports this options generates warnings for functions |
350 | will generate warnings for function which either use alloca or | 351 | that dynamically allocate stack space using alloca. |
351 | create a stack frame bigger than CONFIG_WARN_STACK_SIZE. | ||
352 | 352 | ||
353 | Say N if you are unsure. | 353 | Say N if you are unsure. |
354 | 354 | ||
355 | config WARN_STACK_SIZE | ||
356 | int "Maximum frame size considered safe (128-2048)" | ||
357 | range 128 2048 | ||
358 | depends on WARN_STACK | ||
359 | default "2048" | ||
360 | help | ||
361 | This allows you to specify the maximum frame size a function may | ||
362 | have without the compiler complaining about it. | ||
363 | |||
364 | config ARCH_POPULATES_NODE_MAP | 355 | config ARCH_POPULATES_NODE_MAP |
365 | def_bool y | 356 | def_bool y |
366 | 357 | ||
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 2b380df95606..d76cef3fef37 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug | |||
@@ -31,4 +31,7 @@ config DEBUG_STRICT_USER_COPY_CHECKS | |||
31 | 31 | ||
32 | If unsure, or if you run an older (pre 4.4) gcc, say N. | 32 | If unsure, or if you run an older (pre 4.4) gcc, say N. |
33 | 33 | ||
34 | config DEBUG_SET_MODULE_RONX | ||
35 | def_bool y | ||
36 | depends on MODULES | ||
34 | endmenu | 37 | endmenu |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index d5b8a6ade525..27a0b5df5ead 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -80,8 +80,7 @@ endif | |||
80 | endif | 80 | endif |
81 | 81 | ||
82 | ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) | 82 | ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) |
83 | cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack | 83 | cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack |
84 | cflags-$(CONFIG_WARN_STACK) += -mwarn-framesize=$(CONFIG_WARN_STACK_SIZE) | ||
85 | endif | 84 | endif |
86 | 85 | ||
87 | KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y) | 86 | KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y) |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 1c999f726a58..10e22c4ec4a7 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
@@ -7,7 +7,8 @@ | |||
7 | BITS := $(if $(CONFIG_64BIT),64,31) | 7 | BITS := $(if $(CONFIG_64BIT),64,31) |
8 | 8 | ||
9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ | 9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ |
10 | vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o sizes.h head$(BITS).o | 10 | vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ |
11 | sizes.h head$(BITS).o | ||
11 | 12 | ||
12 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
13 | KBUILD_CFLAGS += $(cflags-y) | 14 | KBUILD_CFLAGS += $(cflags-y) |
@@ -48,6 +49,7 @@ suffix-$(CONFIG_KERNEL_GZIP) := gz | |||
48 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | 49 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 |
49 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | 50 | suffix-$(CONFIG_KERNEL_LZMA) := lzma |
50 | suffix-$(CONFIG_KERNEL_LZO) := lzo | 51 | suffix-$(CONFIG_KERNEL_LZO) := lzo |
52 | suffix-$(CONFIG_KERNEL_XZ) := xz | ||
51 | 53 | ||
52 | $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) | 54 | $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) |
53 | $(call if_changed,gzip) | 55 | $(call if_changed,gzip) |
@@ -57,6 +59,8 @@ $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) | |||
57 | $(call if_changed,lzma) | 59 | $(call if_changed,lzma) |
58 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) | 60 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) |
59 | $(call if_changed,lzo) | 61 | $(call if_changed,lzo) |
62 | $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) | ||
63 | $(call if_changed,xzkern) | ||
60 | 64 | ||
61 | LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T | 65 | LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T |
62 | $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) | 66 | $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 2751b3a8a66f..028f23ea81d1 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #undef memset | 19 | #undef memset |
20 | #undef memcpy | 20 | #undef memcpy |
21 | #undef memmove | 21 | #undef memmove |
22 | #define memmove memmove | ||
22 | #define memzero(s, n) memset((s), 0, (n)) | 23 | #define memzero(s, n) memset((s), 0, (n)) |
23 | 24 | ||
24 | /* Symbols defined by linker scripts */ | 25 | /* Symbols defined by linker scripts */ |
@@ -54,6 +55,10 @@ static unsigned long free_mem_end_ptr; | |||
54 | #include "../../../../lib/decompress_unlzo.c" | 55 | #include "../../../../lib/decompress_unlzo.c" |
55 | #endif | 56 | #endif |
56 | 57 | ||
58 | #ifdef CONFIG_KERNEL_XZ | ||
59 | #include "../../../../lib/decompress_unxz.c" | ||
60 | #endif | ||
61 | |||
57 | extern _sclp_print_early(const char *); | 62 | extern _sclp_print_early(const char *); |
58 | 63 | ||
59 | int puts(const char *s) | 64 | int puts(const char *s) |
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 7e1f77620624..43a5c78046db 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h | |||
@@ -8,4 +8,8 @@ | |||
8 | void kernel_map_pages(struct page *page, int numpages, int enable); | 8 | void kernel_map_pages(struct page *page, int numpages, int enable); |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | int set_memory_ro(unsigned long addr, int numpages); | ||
12 | int set_memory_rw(unsigned long addr, int numpages); | ||
13 | int set_memory_nx(unsigned long addr, int numpages); | ||
14 | |||
11 | #endif /* _S390_CACHEFLUSH_H */ | 15 | #endif /* _S390_CACHEFLUSH_H */ |
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 5c5d02de49e9..81cf36b691f1 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <asm/errno.h> | 8 | #include <asm/errno.h> |
9 | 9 | ||
10 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 10 | static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
11 | { | 11 | { |
12 | int op = (encoded_op >> 28) & 7; | 12 | int op = (encoded_op >> 28) & 7; |
13 | int cmp = (encoded_op >> 24) & 15; | 13 | int cmp = (encoded_op >> 24) & 15; |
@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
19 | oparg = 1 << oparg; | 19 | oparg = 1 << oparg; |
20 | 20 | ||
21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 21 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
22 | return -EFAULT; | 22 | return -EFAULT; |
23 | 23 | ||
24 | pagefault_disable(); | 24 | pagefault_disable(); |
@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
39 | return ret; | 39 | return ret; |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, | 42 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
43 | int oldval, int newval) | 43 | u32 oldval, u32 newval) |
44 | { | 44 | { |
45 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 45 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
46 | return -EFAULT; | 46 | return -EFAULT; |
47 | 47 | ||
48 | return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); | 48 | return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); |
49 | } | 49 | } |
50 | 50 | ||
51 | #endif /* __KERNEL__ */ | 51 | #endif /* __KERNEL__ */ |
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h index 423fdda2322d..d0eb4653cebd 100644 --- a/arch/s390/include/asm/rwsem.h +++ b/arch/s390/include/asm/rwsem.h | |||
@@ -43,29 +43,6 @@ | |||
43 | 43 | ||
44 | #ifdef __KERNEL__ | 44 | #ifdef __KERNEL__ |
45 | 45 | ||
46 | #include <linux/list.h> | ||
47 | #include <linux/spinlock.h> | ||
48 | |||
49 | struct rwsem_waiter; | ||
50 | |||
51 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *); | ||
52 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *); | ||
53 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | ||
54 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *); | ||
55 | extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *); | ||
56 | |||
57 | /* | ||
58 | * the semaphore definition | ||
59 | */ | ||
60 | struct rw_semaphore { | ||
61 | signed long count; | ||
62 | spinlock_t wait_lock; | ||
63 | struct list_head wait_list; | ||
64 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
65 | struct lockdep_map dep_map; | ||
66 | #endif | ||
67 | }; | ||
68 | |||
69 | #ifndef __s390x__ | 46 | #ifndef __s390x__ |
70 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 47 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
71 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 48 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
@@ -81,41 +58,6 @@ struct rw_semaphore { | |||
81 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 58 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
82 | 59 | ||
83 | /* | 60 | /* |
84 | * initialisation | ||
85 | */ | ||
86 | |||
87 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
88 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
89 | #else | ||
90 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
91 | #endif | ||
92 | |||
93 | #define __RWSEM_INITIALIZER(name) \ | ||
94 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \ | ||
95 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | ||
96 | |||
97 | #define DECLARE_RWSEM(name) \ | ||
98 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
99 | |||
100 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
101 | { | ||
102 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
103 | spin_lock_init(&sem->wait_lock); | ||
104 | INIT_LIST_HEAD(&sem->wait_list); | ||
105 | } | ||
106 | |||
107 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
108 | struct lock_class_key *key); | ||
109 | |||
110 | #define init_rwsem(sem) \ | ||
111 | do { \ | ||
112 | static struct lock_class_key __key; \ | ||
113 | \ | ||
114 | __init_rwsem((sem), #sem, &__key); \ | ||
115 | } while (0) | ||
116 | |||
117 | |||
118 | /* | ||
119 | * lock for reading | 61 | * lock for reading |
120 | */ | 62 | */ |
121 | static inline void __down_read(struct rw_semaphore *sem) | 63 | static inline void __down_read(struct rw_semaphore *sem) |
@@ -377,10 +319,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
377 | return new; | 319 | return new; |
378 | } | 320 | } |
379 | 321 | ||
380 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
381 | { | ||
382 | return (sem->count != 0); | ||
383 | } | ||
384 | |||
385 | #endif /* __KERNEL__ */ | 322 | #endif /* __KERNEL__ */ |
386 | #endif /* _S390_RWSEM_H */ | 323 | #endif /* _S390_RWSEM_H */ |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0ec52b..2d9ea11f919a 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -83,8 +83,8 @@ struct uaccess_ops { | |||
83 | size_t (*clear_user)(size_t, void __user *); | 83 | size_t (*clear_user)(size_t, void __user *); |
84 | size_t (*strnlen_user)(size_t, const char __user *); | 84 | size_t (*strnlen_user)(size_t, const char __user *); |
85 | size_t (*strncpy_from_user)(size_t, const char __user *, char *); | 85 | size_t (*strncpy_from_user)(size_t, const char __user *, char *); |
86 | int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); | 86 | int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); |
87 | int (*futex_atomic_cmpxchg)(int __user *, int old, int new); | 87 | int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); |
88 | }; | 88 | }; |
89 | 89 | ||
90 | extern struct uaccess_ops uaccess; | 90 | extern struct uaccess_ops uaccess; |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index a922d51df6bf..b09b9c62573e 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/kexec.h> | 12 | #include <linux/kexec.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/reboot.h> | 14 | #include <linux/reboot.h> |
15 | #include <linux/ftrace.h> | ||
15 | #include <asm/cio.h> | 16 | #include <asm/cio.h> |
16 | #include <asm/setup.h> | 17 | #include <asm/setup.h> |
17 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
@@ -71,6 +72,7 @@ static void __machine_kexec(void *data) | |||
71 | 72 | ||
72 | void machine_kexec(struct kimage *image) | 73 | void machine_kexec(struct kimage *image) |
73 | { | 74 | { |
75 | tracer_disable(); | ||
74 | smp_send_stop(); | 76 | smp_send_stop(); |
75 | smp_switch_to_ipl_cpu(__machine_kexec, image); | 77 | smp_switch_to_ipl_cpu(__machine_kexec, image); |
76 | } | 78 | } |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a68ac10213b2..1bc18cdb525b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -77,7 +77,7 @@ SECTIONS | |||
77 | . = ALIGN(PAGE_SIZE); | 77 | . = ALIGN(PAGE_SIZE); |
78 | INIT_DATA_SECTION(0x100) | 78 | INIT_DATA_SECTION(0x100) |
79 | 79 | ||
80 | PERCPU(PAGE_SIZE) | 80 | PERCPU(0x100, PAGE_SIZE) |
81 | . = ALIGN(PAGE_SIZE); | 81 | . = ALIGN(PAGE_SIZE); |
82 | __init_end = .; /* freed after init ends here */ | 82 | __init_end = .; /* freed after init ends here */ |
83 | 83 | ||
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 126011df14f1..1d2536cb630b 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h | |||
@@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); | |||
12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | 12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); |
13 | extern size_t strnlen_user_std(size_t, const char __user *); | 13 | extern size_t strnlen_user_std(size_t, const char __user *); |
14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | 14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); |
15 | extern int futex_atomic_cmpxchg_std(int __user *, int, int); | 15 | extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); |
16 | extern int futex_atomic_op_std(int, int __user *, int, int *); | 16 | extern int futex_atomic_op_std(int, u32 __user *, int, int *); |
17 | 17 | ||
18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); | 18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); |
19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); | 19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); |
20 | extern int futex_atomic_op_pt(int, int __user *, int, int *); | 20 | extern int futex_atomic_op_pt(int, u32 __user *, int, int *); |
21 | extern int futex_atomic_cmpxchg_pt(int __user *, int, int); | 21 | extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); |
22 | 22 | ||
23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ | 23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 404f2de296dc..74833831417f 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -302,7 +302,7 @@ fault: | |||
302 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 302 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
303 | "m" (*uaddr) : "cc" ); | 303 | "m" (*uaddr) : "cc" ); |
304 | 304 | ||
305 | static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 305 | static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) |
306 | { | 306 | { |
307 | int oldval = 0, newval, ret; | 307 | int oldval = 0, newval, ret; |
308 | 308 | ||
@@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |||
335 | return ret; | 335 | return ret; |
336 | } | 336 | } |
337 | 337 | ||
338 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 338 | int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) |
339 | { | 339 | { |
340 | int ret; | 340 | int ret; |
341 | 341 | ||
@@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |||
354 | return ret; | 354 | return ret; |
355 | } | 355 | } |
356 | 356 | ||
357 | static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 357 | static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, |
358 | u32 oldval, u32 newval) | ||
358 | { | 359 | { |
359 | int ret; | 360 | int ret; |
360 | 361 | ||
361 | asm volatile("0: cs %1,%4,0(%5)\n" | 362 | asm volatile("0: cs %1,%4,0(%5)\n" |
362 | "1: lr %0,%1\n" | 363 | "1: la %0,0\n" |
363 | "2:\n" | 364 | "2:\n" |
364 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 365 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
365 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 366 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
366 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 367 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
367 | : "cc", "memory" ); | 368 | : "cc", "memory" ); |
369 | *uval = oldval; | ||
368 | return ret; | 370 | return ret; |
369 | } | 371 | } |
370 | 372 | ||
371 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 373 | int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, |
374 | u32 oldval, u32 newval) | ||
372 | { | 375 | { |
373 | int ret; | 376 | int ret; |
374 | 377 | ||
375 | if (segment_eq(get_fs(), KERNEL_DS)) | 378 | if (segment_eq(get_fs(), KERNEL_DS)) |
376 | return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | 379 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
377 | spin_lock(¤t->mm->page_table_lock); | 380 | spin_lock(¤t->mm->page_table_lock); |
378 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 381 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); |
379 | if (!uaddr) { | 382 | if (!uaddr) { |
@@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | |||
382 | } | 385 | } |
383 | get_page(virt_to_page(uaddr)); | 386 | get_page(virt_to_page(uaddr)); |
384 | spin_unlock(¤t->mm->page_table_lock); | 387 | spin_unlock(¤t->mm->page_table_lock); |
385 | ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | 388 | ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
386 | put_page(virt_to_page(uaddr)); | 389 | put_page(virt_to_page(uaddr)); |
387 | return ret; | 390 | return ret; |
388 | } | 391 | } |
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index a6c4f7ed24a4..bb1a7eed42ce 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
@@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) | |||
255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
256 | "m" (*uaddr) : "cc"); | 256 | "m" (*uaddr) : "cc"); |
257 | 257 | ||
258 | int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) | 258 | int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) |
259 | { | 259 | { |
260 | int oldval = 0, newval, ret; | 260 | int oldval = 0, newval, ret; |
261 | 261 | ||
@@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) | |||
287 | return ret; | 287 | return ret; |
288 | } | 288 | } |
289 | 289 | ||
290 | int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) | 290 | int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, |
291 | u32 oldval, u32 newval) | ||
291 | { | 292 | { |
292 | int ret; | 293 | int ret; |
293 | 294 | ||
294 | asm volatile( | 295 | asm volatile( |
295 | " sacf 256\n" | 296 | " sacf 256\n" |
296 | "0: cs %1,%4,0(%5)\n" | 297 | "0: cs %1,%4,0(%5)\n" |
297 | "1: lr %0,%1\n" | 298 | "1: la %0,0\n" |
298 | "2: sacf 0\n" | 299 | "2: sacf 0\n" |
299 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 300 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
300 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 301 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
301 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 302 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
302 | : "cc", "memory" ); | 303 | : "cc", "memory" ); |
304 | *uval = oldval; | ||
303 | return ret; | 305 | return ret; |
304 | } | 306 | } |
305 | 307 | ||
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index 6fbc6f3fbdf2..d98fe9004a52 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -6,3 +6,4 @@ obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ | |||
6 | page-states.o gup.o | 6 | page-states.o gup.o |
7 | obj-$(CONFIG_CMM) += cmm.o | 7 | obj-$(CONFIG_CMM) += cmm.o |
8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
9 | obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o | ||
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c new file mode 100644 index 000000000000..122ffbd08ce0 --- /dev/null +++ b/arch/s390/mm/pageattr.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2011 | ||
3 | * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> | ||
4 | */ | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/hugetlb.h> | ||
8 | #include <asm/pgtable.h> | ||
9 | |||
10 | static void change_page_attr(unsigned long addr, int numpages, | ||
11 | pte_t (*set) (pte_t)) | ||
12 | { | ||
13 | pte_t *ptep, pte; | ||
14 | pmd_t *pmdp; | ||
15 | pud_t *pudp; | ||
16 | pgd_t *pgdp; | ||
17 | int i; | ||
18 | |||
19 | for (i = 0; i < numpages; i++) { | ||
20 | pgdp = pgd_offset(&init_mm, addr); | ||
21 | pudp = pud_offset(pgdp, addr); | ||
22 | pmdp = pmd_offset(pudp, addr); | ||
23 | if (pmd_huge(*pmdp)) { | ||
24 | WARN_ON_ONCE(1); | ||
25 | continue; | ||
26 | } | ||
27 | ptep = pte_offset_kernel(pmdp, addr + i * PAGE_SIZE); | ||
28 | |||
29 | pte = *ptep; | ||
30 | pte = set(pte); | ||
31 | ptep_invalidate(&init_mm, addr + i * PAGE_SIZE, ptep); | ||
32 | *ptep = pte; | ||
33 | } | ||
34 | } | ||
35 | |||
36 | int set_memory_ro(unsigned long addr, int numpages) | ||
37 | { | ||
38 | change_page_attr(addr, numpages, pte_wrprotect); | ||
39 | return 0; | ||
40 | } | ||
41 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
42 | |||
43 | int set_memory_rw(unsigned long addr, int numpages) | ||
44 | { | ||
45 | change_page_attr(addr, numpages, pte_mkwrite); | ||
46 | return 0; | ||
47 | } | ||
48 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
49 | |||
50 | /* not possible */ | ||
51 | int set_memory_nx(unsigned long addr, int numpages) | ||
52 | { | ||
53 | return 0; | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(set_memory_nx); | ||
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile index 537b2d840e69..d698cddcfbdd 100644 --- a/arch/s390/oprofile/Makefile +++ b/arch/s390/oprofile/Makefile | |||
@@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | |||
6 | oprofilefs.o oprofile_stats.o \ | 6 | oprofilefs.o oprofile_stats.o \ |
7 | timer_int.o ) | 7 | timer_int.o ) |
8 | 8 | ||
9 | oprofile-y := $(DRIVER_OBJS) init.o backtrace.o | 9 | oprofile-y := $(DRIVER_OBJS) init.o backtrace.o hwsampler.o |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c new file mode 100644 index 000000000000..3d48f4db246d --- /dev/null +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -0,0 +1,1256 @@ | |||
1 | /** | ||
2 | * arch/s390/oprofile/hwsampler.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * Author: Heinz Graalfs <graalfs@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/smp.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/notifier.h> | ||
15 | #include <linux/cpu.h> | ||
16 | #include <linux/semaphore.h> | ||
17 | #include <linux/oom.h> | ||
18 | #include <linux/oprofile.h> | ||
19 | |||
20 | #include <asm/lowcore.h> | ||
21 | #include <asm/s390_ext.h> | ||
22 | |||
23 | #include "hwsampler.h" | ||
24 | |||
25 | #define MAX_NUM_SDB 511 | ||
26 | #define MIN_NUM_SDB 1 | ||
27 | |||
28 | #define ALERT_REQ_MASK 0x4000000000000000ul | ||
29 | #define BUFFER_FULL_MASK 0x8000000000000000ul | ||
30 | |||
31 | #define EI_IEA (1 << 31) /* invalid entry address */ | ||
32 | #define EI_ISE (1 << 30) /* incorrect SDBT entry */ | ||
33 | #define EI_PRA (1 << 29) /* program request alert */ | ||
34 | #define EI_SACA (1 << 23) /* sampler authorization change alert */ | ||
35 | #define EI_LSDA (1 << 22) /* loss of sample data alert */ | ||
36 | |||
37 | DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); | ||
38 | |||
39 | struct hws_execute_parms { | ||
40 | void *buffer; | ||
41 | signed int rc; | ||
42 | }; | ||
43 | |||
44 | DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); | ||
45 | EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer); | ||
46 | |||
47 | static DEFINE_MUTEX(hws_sem); | ||
48 | static DEFINE_MUTEX(hws_sem_oom); | ||
49 | |||
50 | static unsigned char hws_flush_all; | ||
51 | static unsigned int hws_oom; | ||
52 | static struct workqueue_struct *hws_wq; | ||
53 | |||
54 | static unsigned int hws_state; | ||
55 | enum { | ||
56 | HWS_INIT = 1, | ||
57 | HWS_DEALLOCATED, | ||
58 | HWS_STOPPED, | ||
59 | HWS_STARTED, | ||
60 | HWS_STOPPING }; | ||
61 | |||
62 | /* set to 1 if called by kernel during memory allocation */ | ||
63 | static unsigned char oom_killer_was_active; | ||
64 | /* size of SDBT and SDB as of allocate API */ | ||
65 | static unsigned long num_sdbt = 100; | ||
66 | static unsigned long num_sdb = 511; | ||
67 | /* sampling interval (machine cycles) */ | ||
68 | static unsigned long interval; | ||
69 | |||
70 | static unsigned long min_sampler_rate; | ||
71 | static unsigned long max_sampler_rate; | ||
72 | |||
73 | static int ssctl(void *buffer) | ||
74 | { | ||
75 | int cc; | ||
76 | |||
77 | /* set in order to detect a program check */ | ||
78 | cc = 1; | ||
79 | |||
80 | asm volatile( | ||
81 | "0: .insn s,0xB2870000,0(%1)\n" | ||
82 | "1: ipm %0\n" | ||
83 | " srl %0,28\n" | ||
84 | "2:\n" | ||
85 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
86 | : "+d" (cc), "+a" (buffer) | ||
87 | : "m" (*((struct hws_ssctl_request_block *)buffer)) | ||
88 | : "cc", "memory"); | ||
89 | |||
90 | return cc ? -EINVAL : 0 ; | ||
91 | } | ||
92 | |||
93 | static int qsi(void *buffer) | ||
94 | { | ||
95 | int cc; | ||
96 | cc = 1; | ||
97 | |||
98 | asm volatile( | ||
99 | "0: .insn s,0xB2860000,0(%1)\n" | ||
100 | "1: lhi %0,0\n" | ||
101 | "2:\n" | ||
102 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
103 | : "=d" (cc), "+a" (buffer) | ||
104 | : "m" (*((struct hws_qsi_info_block *)buffer)) | ||
105 | : "cc", "memory"); | ||
106 | |||
107 | return cc ? -EINVAL : 0; | ||
108 | } | ||
109 | |||
110 | static void execute_qsi(void *parms) | ||
111 | { | ||
112 | struct hws_execute_parms *ep = parms; | ||
113 | |||
114 | ep->rc = qsi(ep->buffer); | ||
115 | } | ||
116 | |||
117 | static void execute_ssctl(void *parms) | ||
118 | { | ||
119 | struct hws_execute_parms *ep = parms; | ||
120 | |||
121 | ep->rc = ssctl(ep->buffer); | ||
122 | } | ||
123 | |||
124 | static int smp_ctl_ssctl_stop(int cpu) | ||
125 | { | ||
126 | int rc; | ||
127 | struct hws_execute_parms ep; | ||
128 | struct hws_cpu_buffer *cb; | ||
129 | |||
130 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
131 | |||
132 | cb->ssctl.es = 0; | ||
133 | cb->ssctl.cs = 0; | ||
134 | |||
135 | ep.buffer = &cb->ssctl; | ||
136 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); | ||
137 | rc = ep.rc; | ||
138 | if (rc) { | ||
139 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); | ||
140 | dump_stack(); | ||
141 | } | ||
142 | |||
143 | ep.buffer = &cb->qsi; | ||
144 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | ||
145 | |||
146 | if (cb->qsi.es || cb->qsi.cs) { | ||
147 | printk(KERN_EMERG "CPUMF sampling did not stop properly.\n"); | ||
148 | dump_stack(); | ||
149 | } | ||
150 | |||
151 | return rc; | ||
152 | } | ||
153 | |||
154 | static int smp_ctl_ssctl_deactivate(int cpu) | ||
155 | { | ||
156 | int rc; | ||
157 | struct hws_execute_parms ep; | ||
158 | struct hws_cpu_buffer *cb; | ||
159 | |||
160 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
161 | |||
162 | cb->ssctl.es = 1; | ||
163 | cb->ssctl.cs = 0; | ||
164 | |||
165 | ep.buffer = &cb->ssctl; | ||
166 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); | ||
167 | rc = ep.rc; | ||
168 | if (rc) | ||
169 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); | ||
170 | |||
171 | ep.buffer = &cb->qsi; | ||
172 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | ||
173 | |||
174 | if (cb->qsi.cs) | ||
175 | printk(KERN_EMERG "CPUMF sampling was not set inactive.\n"); | ||
176 | |||
177 | return rc; | ||
178 | } | ||
179 | |||
180 | static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval) | ||
181 | { | ||
182 | int rc; | ||
183 | struct hws_execute_parms ep; | ||
184 | struct hws_cpu_buffer *cb; | ||
185 | |||
186 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
187 | |||
188 | cb->ssctl.h = 1; | ||
189 | cb->ssctl.tear = cb->first_sdbt; | ||
190 | cb->ssctl.dear = *(unsigned long *) cb->first_sdbt; | ||
191 | cb->ssctl.interval = interval; | ||
192 | cb->ssctl.es = 1; | ||
193 | cb->ssctl.cs = 1; | ||
194 | |||
195 | ep.buffer = &cb->ssctl; | ||
196 | smp_call_function_single(cpu, execute_ssctl, &ep, 1); | ||
197 | rc = ep.rc; | ||
198 | if (rc) | ||
199 | printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu); | ||
200 | |||
201 | ep.buffer = &cb->qsi; | ||
202 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | ||
203 | if (ep.rc) | ||
204 | printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu); | ||
205 | |||
206 | return rc; | ||
207 | } | ||
208 | |||
209 | static int smp_ctl_qsi(int cpu) | ||
210 | { | ||
211 | struct hws_execute_parms ep; | ||
212 | struct hws_cpu_buffer *cb; | ||
213 | |||
214 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
215 | |||
216 | ep.buffer = &cb->qsi; | ||
217 | smp_call_function_single(cpu, execute_qsi, &ep, 1); | ||
218 | |||
219 | return ep.rc; | ||
220 | } | ||
221 | |||
222 | static inline unsigned long *trailer_entry_ptr(unsigned long v) | ||
223 | { | ||
224 | void *ret; | ||
225 | |||
226 | ret = (void *)v; | ||
227 | ret += PAGE_SIZE; | ||
228 | ret -= sizeof(struct hws_trailer_entry); | ||
229 | |||
230 | return (unsigned long *) ret; | ||
231 | } | ||
232 | |||
233 | /* prototypes for external interrupt handler and worker */ | ||
234 | static void hws_ext_handler(unsigned int ext_int_code, | ||
235 | unsigned int param32, unsigned long param64); | ||
236 | |||
237 | static void worker(struct work_struct *work); | ||
238 | |||
239 | static void add_samples_to_oprofile(unsigned cpu, unsigned long *, | ||
240 | unsigned long *dear); | ||
241 | |||
242 | static void init_all_cpu_buffers(void) | ||
243 | { | ||
244 | int cpu; | ||
245 | struct hws_cpu_buffer *cb; | ||
246 | |||
247 | for_each_online_cpu(cpu) { | ||
248 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
249 | memset(cb, 0, sizeof(struct hws_cpu_buffer)); | ||
250 | } | ||
251 | } | ||
252 | |||
253 | static int is_link_entry(unsigned long *s) | ||
254 | { | ||
255 | return *s & 0x1ul ? 1 : 0; | ||
256 | } | ||
257 | |||
258 | static unsigned long *get_next_sdbt(unsigned long *s) | ||
259 | { | ||
260 | return (unsigned long *) (*s & ~0x1ul); | ||
261 | } | ||
262 | |||
263 | static int prepare_cpu_buffers(void) | ||
264 | { | ||
265 | int cpu; | ||
266 | int rc; | ||
267 | struct hws_cpu_buffer *cb; | ||
268 | |||
269 | rc = 0; | ||
270 | for_each_online_cpu(cpu) { | ||
271 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
272 | atomic_set(&cb->ext_params, 0); | ||
273 | cb->worker_entry = 0; | ||
274 | cb->sample_overflow = 0; | ||
275 | cb->req_alert = 0; | ||
276 | cb->incorrect_sdbt_entry = 0; | ||
277 | cb->invalid_entry_address = 0; | ||
278 | cb->loss_of_sample_data = 0; | ||
279 | cb->sample_auth_change_alert = 0; | ||
280 | cb->finish = 0; | ||
281 | cb->oom = 0; | ||
282 | cb->stop_mode = 0; | ||
283 | } | ||
284 | |||
285 | return rc; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * allocate_sdbt() - allocate sampler memory | ||
290 | * @cpu: the cpu for which sampler memory is allocated | ||
291 | * | ||
292 | * A 4K page is allocated for each requested SDBT. | ||
293 | * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs. | ||
294 | * Set ALERT_REQ mask in each SDBs trailer. | ||
295 | * Returns zero if successful, <0 otherwise. | ||
296 | */ | ||
297 | static int allocate_sdbt(int cpu) | ||
298 | { | ||
299 | int j, k, rc; | ||
300 | unsigned long *sdbt; | ||
301 | unsigned long sdb; | ||
302 | unsigned long *tail; | ||
303 | unsigned long *trailer; | ||
304 | struct hws_cpu_buffer *cb; | ||
305 | |||
306 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
307 | |||
308 | if (cb->first_sdbt) | ||
309 | return -EINVAL; | ||
310 | |||
311 | sdbt = NULL; | ||
312 | tail = sdbt; | ||
313 | |||
314 | for (j = 0; j < num_sdbt; j++) { | ||
315 | sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); | ||
316 | |||
317 | mutex_lock(&hws_sem_oom); | ||
318 | /* OOM killer might have been activated */ | ||
319 | barrier(); | ||
320 | if (oom_killer_was_active || !sdbt) { | ||
321 | if (sdbt) | ||
322 | free_page((unsigned long)sdbt); | ||
323 | |||
324 | goto allocate_sdbt_error; | ||
325 | } | ||
326 | if (cb->first_sdbt == 0) | ||
327 | cb->first_sdbt = (unsigned long)sdbt; | ||
328 | |||
329 | /* link current page to tail of chain */ | ||
330 | if (tail) | ||
331 | *tail = (unsigned long)(void *)sdbt + 1; | ||
332 | |||
333 | mutex_unlock(&hws_sem_oom); | ||
334 | |||
335 | for (k = 0; k < num_sdb; k++) { | ||
336 | /* get and set SDB page */ | ||
337 | sdb = get_zeroed_page(GFP_KERNEL); | ||
338 | |||
339 | mutex_lock(&hws_sem_oom); | ||
340 | /* OOM killer might have been activated */ | ||
341 | barrier(); | ||
342 | if (oom_killer_was_active || !sdb) { | ||
343 | if (sdb) | ||
344 | free_page(sdb); | ||
345 | |||
346 | goto allocate_sdbt_error; | ||
347 | } | ||
348 | *sdbt = sdb; | ||
349 | trailer = trailer_entry_ptr(*sdbt); | ||
350 | *trailer = ALERT_REQ_MASK; | ||
351 | sdbt++; | ||
352 | mutex_unlock(&hws_sem_oom); | ||
353 | } | ||
354 | tail = sdbt; | ||
355 | } | ||
356 | mutex_lock(&hws_sem_oom); | ||
357 | if (oom_killer_was_active) | ||
358 | goto allocate_sdbt_error; | ||
359 | |||
360 | rc = 0; | ||
361 | if (tail) | ||
362 | *tail = (unsigned long) | ||
363 | ((void *)cb->first_sdbt) + 1; | ||
364 | |||
365 | allocate_sdbt_exit: | ||
366 | mutex_unlock(&hws_sem_oom); | ||
367 | return rc; | ||
368 | |||
369 | allocate_sdbt_error: | ||
370 | rc = -ENOMEM; | ||
371 | goto allocate_sdbt_exit; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * deallocate_sdbt() - deallocate all sampler memory | ||
376 | * | ||
377 | * For each online CPU all SDBT trees are deallocated. | ||
378 | * Returns the number of freed pages. | ||
379 | */ | ||
380 | static int deallocate_sdbt(void) | ||
381 | { | ||
382 | int cpu; | ||
383 | int counter; | ||
384 | |||
385 | counter = 0; | ||
386 | |||
387 | for_each_online_cpu(cpu) { | ||
388 | unsigned long start; | ||
389 | unsigned long sdbt; | ||
390 | unsigned long *curr; | ||
391 | struct hws_cpu_buffer *cb; | ||
392 | |||
393 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
394 | |||
395 | if (!cb->first_sdbt) | ||
396 | continue; | ||
397 | |||
398 | sdbt = cb->first_sdbt; | ||
399 | curr = (unsigned long *) sdbt; | ||
400 | start = sdbt; | ||
401 | |||
402 | /* we'll free the SDBT after all SDBs are processed... */ | ||
403 | while (1) { | ||
404 | if (!*curr || !sdbt) | ||
405 | break; | ||
406 | |||
407 | /* watch for link entry reset if found */ | ||
408 | if (is_link_entry(curr)) { | ||
409 | curr = get_next_sdbt(curr); | ||
410 | if (sdbt) | ||
411 | free_page(sdbt); | ||
412 | |||
413 | /* we are done if we reach the start */ | ||
414 | if ((unsigned long) curr == start) | ||
415 | break; | ||
416 | else | ||
417 | sdbt = (unsigned long) curr; | ||
418 | } else { | ||
419 | /* process SDB pointer */ | ||
420 | if (*curr) { | ||
421 | free_page(*curr); | ||
422 | curr++; | ||
423 | } | ||
424 | } | ||
425 | counter++; | ||
426 | } | ||
427 | cb->first_sdbt = 0; | ||
428 | } | ||
429 | return counter; | ||
430 | } | ||
431 | |||
432 | static int start_sampling(int cpu) | ||
433 | { | ||
434 | int rc; | ||
435 | struct hws_cpu_buffer *cb; | ||
436 | |||
437 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
438 | rc = smp_ctl_ssctl_enable_activate(cpu, interval); | ||
439 | if (rc) { | ||
440 | printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu); | ||
441 | goto start_exit; | ||
442 | } | ||
443 | |||
444 | rc = -EINVAL; | ||
445 | if (!cb->qsi.es) { | ||
446 | printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu); | ||
447 | goto start_exit; | ||
448 | } | ||
449 | |||
450 | if (!cb->qsi.cs) { | ||
451 | printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu); | ||
452 | goto start_exit; | ||
453 | } | ||
454 | |||
455 | printk(KERN_INFO | ||
456 | "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n", | ||
457 | cpu, interval); | ||
458 | |||
459 | rc = 0; | ||
460 | |||
461 | start_exit: | ||
462 | return rc; | ||
463 | } | ||
464 | |||
465 | static int stop_sampling(int cpu) | ||
466 | { | ||
467 | unsigned long v; | ||
468 | int rc; | ||
469 | struct hws_cpu_buffer *cb; | ||
470 | |||
471 | rc = smp_ctl_qsi(cpu); | ||
472 | WARN_ON(rc); | ||
473 | |||
474 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
475 | if (!rc && !cb->qsi.es) | ||
476 | printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu); | ||
477 | |||
478 | rc = smp_ctl_ssctl_stop(cpu); | ||
479 | if (rc) { | ||
480 | printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n", | ||
481 | cpu, rc); | ||
482 | goto stop_exit; | ||
483 | } | ||
484 | |||
485 | printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu); | ||
486 | |||
487 | stop_exit: | ||
488 | v = cb->req_alert; | ||
489 | if (v) | ||
490 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert," | ||
491 | " count=%lu.\n", cpu, v); | ||
492 | |||
493 | v = cb->loss_of_sample_data; | ||
494 | if (v) | ||
495 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data," | ||
496 | " count=%lu.\n", cpu, v); | ||
497 | |||
498 | v = cb->invalid_entry_address; | ||
499 | if (v) | ||
500 | printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address," | ||
501 | " count=%lu.\n", cpu, v); | ||
502 | |||
503 | v = cb->incorrect_sdbt_entry; | ||
504 | if (v) | ||
505 | printk(KERN_ERR | ||
506 | "hwsampler: CPU %d CPUMF Incorrect SDBT address," | ||
507 | " count=%lu.\n", cpu, v); | ||
508 | |||
509 | v = cb->sample_auth_change_alert; | ||
510 | if (v) | ||
511 | printk(KERN_ERR | ||
512 | "hwsampler: CPU %d CPUMF Sample authorization change," | ||
513 | " count=%lu.\n", cpu, v); | ||
514 | |||
515 | return rc; | ||
516 | } | ||
517 | |||
518 | static int check_hardware_prerequisites(void) | ||
519 | { | ||
520 | unsigned long long facility_bits[2]; | ||
521 | |||
522 | memcpy(facility_bits, S390_lowcore.stfle_fac_list, 32); | ||
523 | if (!(facility_bits[1] & (1ULL << 59))) | ||
524 | return -EOPNOTSUPP; | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | /* | ||
529 | * hws_oom_callback() - the OOM callback function | ||
530 | * | ||
531 | * In case the callback is invoked during memory allocation for the | ||
532 | * hw sampler, all obtained memory is deallocated and a flag is set | ||
533 | * so main sampler memory allocation can exit with a failure code. | ||
534 | * In case the callback is invoked during sampling the hw sampler | ||
535 | * is deactivated for all CPUs. | ||
536 | */ | ||
537 | static int hws_oom_callback(struct notifier_block *nfb, | ||
538 | unsigned long dummy, void *parm) | ||
539 | { | ||
540 | unsigned long *freed; | ||
541 | int cpu; | ||
542 | struct hws_cpu_buffer *cb; | ||
543 | |||
544 | freed = parm; | ||
545 | |||
546 | mutex_lock(&hws_sem_oom); | ||
547 | |||
548 | if (hws_state == HWS_DEALLOCATED) { | ||
549 | /* during memory allocation */ | ||
550 | if (oom_killer_was_active == 0) { | ||
551 | oom_killer_was_active = 1; | ||
552 | *freed += deallocate_sdbt(); | ||
553 | } | ||
554 | } else { | ||
555 | int i; | ||
556 | cpu = get_cpu(); | ||
557 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
558 | |||
559 | if (!cb->oom) { | ||
560 | for_each_online_cpu(i) { | ||
561 | smp_ctl_ssctl_deactivate(i); | ||
562 | cb->oom = 1; | ||
563 | } | ||
564 | cb->finish = 1; | ||
565 | |||
566 | printk(KERN_INFO | ||
567 | "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n", | ||
568 | cpu); | ||
569 | } | ||
570 | } | ||
571 | |||
572 | mutex_unlock(&hws_sem_oom); | ||
573 | |||
574 | return NOTIFY_OK; | ||
575 | } | ||
576 | |||
577 | static struct notifier_block hws_oom_notifier = { | ||
578 | .notifier_call = hws_oom_callback | ||
579 | }; | ||
580 | |||
581 | static int hws_cpu_callback(struct notifier_block *nfb, | ||
582 | unsigned long action, void *hcpu) | ||
583 | { | ||
584 | /* We do not have sampler space available for all possible CPUs. | ||
585 | All CPUs should be online when hw sampling is activated. */ | ||
586 | return NOTIFY_BAD; | ||
587 | } | ||
588 | |||
589 | static struct notifier_block hws_cpu_notifier = { | ||
590 | .notifier_call = hws_cpu_callback | ||
591 | }; | ||
592 | |||
593 | /** | ||
594 | * hwsampler_deactivate() - set hardware sampling temporarily inactive | ||
595 | * @cpu: specifies the CPU to be set inactive. | ||
596 | * | ||
597 | * Returns 0 on success, !0 on failure. | ||
598 | */ | ||
599 | int hwsampler_deactivate(unsigned int cpu) | ||
600 | { | ||
601 | /* | ||
602 | * Deactivate hw sampling temporarily and flush the buffer | ||
603 | * by pushing all the pending samples to oprofile buffer. | ||
604 | * | ||
605 | * This function can be called under one of the following conditions: | ||
606 | * Memory unmap, task is exiting. | ||
607 | */ | ||
608 | int rc; | ||
609 | struct hws_cpu_buffer *cb; | ||
610 | |||
611 | rc = 0; | ||
612 | mutex_lock(&hws_sem); | ||
613 | |||
614 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
615 | if (hws_state == HWS_STARTED) { | ||
616 | rc = smp_ctl_qsi(cpu); | ||
617 | WARN_ON(rc); | ||
618 | if (cb->qsi.cs) { | ||
619 | rc = smp_ctl_ssctl_deactivate(cpu); | ||
620 | if (rc) { | ||
621 | printk(KERN_INFO | ||
622 | "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu); | ||
623 | cb->finish = 1; | ||
624 | hws_state = HWS_STOPPING; | ||
625 | } else { | ||
626 | hws_flush_all = 1; | ||
627 | /* Add work to queue to read pending samples.*/ | ||
628 | queue_work_on(cpu, hws_wq, &cb->worker); | ||
629 | } | ||
630 | } | ||
631 | } | ||
632 | mutex_unlock(&hws_sem); | ||
633 | |||
634 | if (hws_wq) | ||
635 | flush_workqueue(hws_wq); | ||
636 | |||
637 | return rc; | ||
638 | } | ||
639 | |||
640 | /** | ||
641 | * hwsampler_activate() - activate/resume hardware sampling which was deactivated | ||
642 | * @cpu: specifies the CPU to be set active. | ||
643 | * | ||
644 | * Returns 0 on success, !0 on failure. | ||
645 | */ | ||
646 | int hwsampler_activate(unsigned int cpu) | ||
647 | { | ||
648 | /* | ||
649 | * Re-activate hw sampling. This should be called in pair with | ||
650 | * hwsampler_deactivate(). | ||
651 | */ | ||
652 | int rc; | ||
653 | struct hws_cpu_buffer *cb; | ||
654 | |||
655 | rc = 0; | ||
656 | mutex_lock(&hws_sem); | ||
657 | |||
658 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
659 | if (hws_state == HWS_STARTED) { | ||
660 | rc = smp_ctl_qsi(cpu); | ||
661 | WARN_ON(rc); | ||
662 | if (!cb->qsi.cs) { | ||
663 | hws_flush_all = 0; | ||
664 | rc = smp_ctl_ssctl_enable_activate(cpu, interval); | ||
665 | if (rc) { | ||
666 | printk(KERN_ERR | ||
667 | "CPU %d, CPUMF activate sampling failed.\n", | ||
668 | cpu); | ||
669 | } | ||
670 | } | ||
671 | } | ||
672 | |||
673 | mutex_unlock(&hws_sem); | ||
674 | |||
675 | return rc; | ||
676 | } | ||
677 | |||
678 | static void hws_ext_handler(unsigned int ext_int_code, | ||
679 | unsigned int param32, unsigned long param64) | ||
680 | { | ||
681 | int cpu; | ||
682 | struct hws_cpu_buffer *cb; | ||
683 | |||
684 | cpu = smp_processor_id(); | ||
685 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
686 | |||
687 | atomic_xchg( | ||
688 | &cb->ext_params, | ||
689 | atomic_read(&cb->ext_params) | ||
690 | | S390_lowcore.ext_params); | ||
691 | |||
692 | if (hws_wq) | ||
693 | queue_work(hws_wq, &cb->worker); | ||
694 | } | ||
695 | |||
696 | static int check_qsi_on_setup(void) | ||
697 | { | ||
698 | int rc; | ||
699 | unsigned int cpu; | ||
700 | struct hws_cpu_buffer *cb; | ||
701 | |||
702 | for_each_online_cpu(cpu) { | ||
703 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
704 | rc = smp_ctl_qsi(cpu); | ||
705 | WARN_ON(rc); | ||
706 | if (rc) | ||
707 | return -EOPNOTSUPP; | ||
708 | |||
709 | if (!cb->qsi.as) { | ||
710 | printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n"); | ||
711 | return -EINVAL; | ||
712 | } | ||
713 | |||
714 | if (cb->qsi.es) { | ||
715 | printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n"); | ||
716 | rc = smp_ctl_ssctl_stop(cpu); | ||
717 | if (rc) | ||
718 | return -EINVAL; | ||
719 | |||
720 | printk(KERN_INFO | ||
721 | "CPU %d, CPUMF Sampling stopped now.\n", cpu); | ||
722 | } | ||
723 | } | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | static int check_qsi_on_start(void) | ||
728 | { | ||
729 | unsigned int cpu; | ||
730 | int rc; | ||
731 | struct hws_cpu_buffer *cb; | ||
732 | |||
733 | for_each_online_cpu(cpu) { | ||
734 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
735 | rc = smp_ctl_qsi(cpu); | ||
736 | WARN_ON(rc); | ||
737 | |||
738 | if (!cb->qsi.as) | ||
739 | return -EINVAL; | ||
740 | |||
741 | if (cb->qsi.es) | ||
742 | return -EINVAL; | ||
743 | |||
744 | if (cb->qsi.cs) | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static void worker_on_start(unsigned int cpu) | ||
751 | { | ||
752 | struct hws_cpu_buffer *cb; | ||
753 | |||
754 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
755 | cb->worker_entry = cb->first_sdbt; | ||
756 | } | ||
757 | |||
758 | static int worker_check_error(unsigned int cpu, int ext_params) | ||
759 | { | ||
760 | int rc; | ||
761 | unsigned long *sdbt; | ||
762 | struct hws_cpu_buffer *cb; | ||
763 | |||
764 | rc = 0; | ||
765 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
766 | sdbt = (unsigned long *) cb->worker_entry; | ||
767 | |||
768 | if (!sdbt || !*sdbt) | ||
769 | return -EINVAL; | ||
770 | |||
771 | if (ext_params & EI_IEA) | ||
772 | cb->req_alert++; | ||
773 | |||
774 | if (ext_params & EI_LSDA) | ||
775 | cb->loss_of_sample_data++; | ||
776 | |||
777 | if (ext_params & EI_IEA) { | ||
778 | cb->invalid_entry_address++; | ||
779 | rc = -EINVAL; | ||
780 | } | ||
781 | |||
782 | if (ext_params & EI_ISE) { | ||
783 | cb->incorrect_sdbt_entry++; | ||
784 | rc = -EINVAL; | ||
785 | } | ||
786 | |||
787 | if (ext_params & EI_SACA) { | ||
788 | cb->sample_auth_change_alert++; | ||
789 | rc = -EINVAL; | ||
790 | } | ||
791 | |||
792 | return rc; | ||
793 | } | ||
794 | |||
795 | static void worker_on_finish(unsigned int cpu) | ||
796 | { | ||
797 | int rc, i; | ||
798 | struct hws_cpu_buffer *cb; | ||
799 | |||
800 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
801 | |||
802 | if (cb->finish) { | ||
803 | rc = smp_ctl_qsi(cpu); | ||
804 | WARN_ON(rc); | ||
805 | if (cb->qsi.es) { | ||
806 | printk(KERN_INFO | ||
807 | "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n", | ||
808 | cpu); | ||
809 | rc = smp_ctl_ssctl_stop(cpu); | ||
810 | if (rc) | ||
811 | printk(KERN_INFO | ||
812 | "hwsampler: CPU %d, CPUMF Deactivation failed.\n", | ||
813 | cpu); | ||
814 | |||
815 | for_each_online_cpu(i) { | ||
816 | if (i == cpu) | ||
817 | continue; | ||
818 | if (!cb->finish) { | ||
819 | cb->finish = 1; | ||
820 | queue_work_on(i, hws_wq, | ||
821 | &cb->worker); | ||
822 | } | ||
823 | } | ||
824 | } | ||
825 | } | ||
826 | } | ||
827 | |||
828 | static void worker_on_interrupt(unsigned int cpu) | ||
829 | { | ||
830 | unsigned long *sdbt; | ||
831 | unsigned char done; | ||
832 | struct hws_cpu_buffer *cb; | ||
833 | |||
834 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
835 | |||
836 | sdbt = (unsigned long *) cb->worker_entry; | ||
837 | |||
838 | done = 0; | ||
839 | /* do not proceed if stop was entered, | ||
840 | * forget the buffers not yet processed */ | ||
841 | while (!done && !cb->stop_mode) { | ||
842 | unsigned long *trailer; | ||
843 | struct hws_trailer_entry *te; | ||
844 | unsigned long *dear = 0; | ||
845 | |||
846 | trailer = trailer_entry_ptr(*sdbt); | ||
847 | /* leave loop if no more work to do */ | ||
848 | if (!(*trailer & BUFFER_FULL_MASK)) { | ||
849 | done = 1; | ||
850 | if (!hws_flush_all) | ||
851 | continue; | ||
852 | } | ||
853 | |||
854 | te = (struct hws_trailer_entry *)trailer; | ||
855 | cb->sample_overflow += te->overflow; | ||
856 | |||
857 | add_samples_to_oprofile(cpu, sdbt, dear); | ||
858 | |||
859 | /* reset trailer */ | ||
860 | xchg((unsigned char *) te, 0x40); | ||
861 | |||
862 | /* advance to next sdb slot in current sdbt */ | ||
863 | sdbt++; | ||
864 | /* in case link bit is set use address w/o link bit */ | ||
865 | if (is_link_entry(sdbt)) | ||
866 | sdbt = get_next_sdbt(sdbt); | ||
867 | |||
868 | cb->worker_entry = (unsigned long)sdbt; | ||
869 | } | ||
870 | } | ||
871 | |||
872 | static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, | ||
873 | unsigned long *dear) | ||
874 | { | ||
875 | struct hws_data_entry *sample_data_ptr; | ||
876 | unsigned long *trailer; | ||
877 | |||
878 | trailer = trailer_entry_ptr(*sdbt); | ||
879 | if (dear) { | ||
880 | if (dear > trailer) | ||
881 | return; | ||
882 | trailer = dear; | ||
883 | } | ||
884 | |||
885 | sample_data_ptr = (struct hws_data_entry *)(*sdbt); | ||
886 | |||
887 | while ((unsigned long *)sample_data_ptr < trailer) { | ||
888 | struct pt_regs *regs = NULL; | ||
889 | struct task_struct *tsk = NULL; | ||
890 | |||
891 | /* | ||
892 | * Check sampling mode, 1 indicates basic (=customer) sampling | ||
893 | * mode. | ||
894 | */ | ||
895 | if (sample_data_ptr->def != 1) { | ||
896 | /* sample slot is not yet written */ | ||
897 | break; | ||
898 | } else { | ||
899 | /* make sure we don't use it twice, | ||
900 | * the next time the sampler will set it again */ | ||
901 | sample_data_ptr->def = 0; | ||
902 | } | ||
903 | |||
904 | /* Get pt_regs. */ | ||
905 | if (sample_data_ptr->P == 1) { | ||
906 | /* userspace sample */ | ||
907 | unsigned int pid = sample_data_ptr->prim_asn; | ||
908 | rcu_read_lock(); | ||
909 | tsk = pid_task(find_vpid(pid), PIDTYPE_PID); | ||
910 | if (tsk) | ||
911 | regs = task_pt_regs(tsk); | ||
912 | rcu_read_unlock(); | ||
913 | } else { | ||
914 | /* kernelspace sample */ | ||
915 | regs = task_pt_regs(current); | ||
916 | } | ||
917 | |||
918 | mutex_lock(&hws_sem); | ||
919 | oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0, | ||
920 | !sample_data_ptr->P, tsk); | ||
921 | mutex_unlock(&hws_sem); | ||
922 | |||
923 | sample_data_ptr++; | ||
924 | } | ||
925 | } | ||
926 | |||
927 | static void worker(struct work_struct *work) | ||
928 | { | ||
929 | unsigned int cpu; | ||
930 | int ext_params; | ||
931 | struct hws_cpu_buffer *cb; | ||
932 | |||
933 | cb = container_of(work, struct hws_cpu_buffer, worker); | ||
934 | cpu = smp_processor_id(); | ||
935 | ext_params = atomic_xchg(&cb->ext_params, 0); | ||
936 | |||
937 | if (!cb->worker_entry) | ||
938 | worker_on_start(cpu); | ||
939 | |||
940 | if (worker_check_error(cpu, ext_params)) | ||
941 | return; | ||
942 | |||
943 | if (!cb->finish) | ||
944 | worker_on_interrupt(cpu); | ||
945 | |||
946 | if (cb->finish) | ||
947 | worker_on_finish(cpu); | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * hwsampler_allocate() - allocate memory for the hardware sampler | ||
952 | * @sdbt: number of SDBTs per online CPU (must be > 0) | ||
953 | * @sdb: number of SDBs per SDBT (minimum 1, maximum 511) | ||
954 | * | ||
955 | * Returns 0 on success, !0 on failure. | ||
956 | */ | ||
957 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb) | ||
958 | { | ||
959 | int cpu, rc; | ||
960 | mutex_lock(&hws_sem); | ||
961 | |||
962 | rc = -EINVAL; | ||
963 | if (hws_state != HWS_DEALLOCATED) | ||
964 | goto allocate_exit; | ||
965 | |||
966 | if (sdbt < 1) | ||
967 | goto allocate_exit; | ||
968 | |||
969 | if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB) | ||
970 | goto allocate_exit; | ||
971 | |||
972 | num_sdbt = sdbt; | ||
973 | num_sdb = sdb; | ||
974 | |||
975 | oom_killer_was_active = 0; | ||
976 | register_oom_notifier(&hws_oom_notifier); | ||
977 | |||
978 | for_each_online_cpu(cpu) { | ||
979 | if (allocate_sdbt(cpu)) { | ||
980 | unregister_oom_notifier(&hws_oom_notifier); | ||
981 | goto allocate_error; | ||
982 | } | ||
983 | } | ||
984 | unregister_oom_notifier(&hws_oom_notifier); | ||
985 | if (oom_killer_was_active) | ||
986 | goto allocate_error; | ||
987 | |||
988 | hws_state = HWS_STOPPED; | ||
989 | rc = 0; | ||
990 | |||
991 | allocate_exit: | ||
992 | mutex_unlock(&hws_sem); | ||
993 | return rc; | ||
994 | |||
995 | allocate_error: | ||
996 | rc = -ENOMEM; | ||
997 | printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n"); | ||
998 | goto allocate_exit; | ||
999 | } | ||
1000 | |||
1001 | /** | ||
1002 | * hwsampler_deallocate() - deallocate hardware sampler memory | ||
1003 | * | ||
1004 | * Returns 0 on success, !0 on failure. | ||
1005 | */ | ||
1006 | int hwsampler_deallocate() | ||
1007 | { | ||
1008 | int rc; | ||
1009 | |||
1010 | mutex_lock(&hws_sem); | ||
1011 | |||
1012 | rc = -EINVAL; | ||
1013 | if (hws_state != HWS_STOPPED) | ||
1014 | goto deallocate_exit; | ||
1015 | |||
1016 | smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ | ||
1017 | deallocate_sdbt(); | ||
1018 | |||
1019 | hws_state = HWS_DEALLOCATED; | ||
1020 | rc = 0; | ||
1021 | |||
1022 | deallocate_exit: | ||
1023 | mutex_unlock(&hws_sem); | ||
1024 | |||
1025 | return rc; | ||
1026 | } | ||
1027 | |||
1028 | long hwsampler_query_min_interval(void) | ||
1029 | { | ||
1030 | if (min_sampler_rate) | ||
1031 | return min_sampler_rate; | ||
1032 | else | ||
1033 | return -EINVAL; | ||
1034 | } | ||
1035 | |||
1036 | long hwsampler_query_max_interval(void) | ||
1037 | { | ||
1038 | if (max_sampler_rate) | ||
1039 | return max_sampler_rate; | ||
1040 | else | ||
1041 | return -EINVAL; | ||
1042 | } | ||
1043 | |||
1044 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu) | ||
1045 | { | ||
1046 | struct hws_cpu_buffer *cb; | ||
1047 | |||
1048 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
1049 | |||
1050 | return cb->sample_overflow; | ||
1051 | } | ||
1052 | |||
1053 | int hwsampler_setup() | ||
1054 | { | ||
1055 | int rc; | ||
1056 | int cpu; | ||
1057 | struct hws_cpu_buffer *cb; | ||
1058 | |||
1059 | mutex_lock(&hws_sem); | ||
1060 | |||
1061 | rc = -EINVAL; | ||
1062 | if (hws_state) | ||
1063 | goto setup_exit; | ||
1064 | |||
1065 | hws_state = HWS_INIT; | ||
1066 | |||
1067 | init_all_cpu_buffers(); | ||
1068 | |||
1069 | rc = check_hardware_prerequisites(); | ||
1070 | if (rc) | ||
1071 | goto setup_exit; | ||
1072 | |||
1073 | rc = check_qsi_on_setup(); | ||
1074 | if (rc) | ||
1075 | goto setup_exit; | ||
1076 | |||
1077 | rc = -EINVAL; | ||
1078 | hws_wq = create_workqueue("hwsampler"); | ||
1079 | if (!hws_wq) | ||
1080 | goto setup_exit; | ||
1081 | |||
1082 | register_cpu_notifier(&hws_cpu_notifier); | ||
1083 | |||
1084 | for_each_online_cpu(cpu) { | ||
1085 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
1086 | INIT_WORK(&cb->worker, worker); | ||
1087 | rc = smp_ctl_qsi(cpu); | ||
1088 | WARN_ON(rc); | ||
1089 | if (min_sampler_rate != cb->qsi.min_sampl_rate) { | ||
1090 | if (min_sampler_rate) { | ||
1091 | printk(KERN_WARNING | ||
1092 | "hwsampler: different min sampler rate values.\n"); | ||
1093 | if (min_sampler_rate < cb->qsi.min_sampl_rate) | ||
1094 | min_sampler_rate = | ||
1095 | cb->qsi.min_sampl_rate; | ||
1096 | } else | ||
1097 | min_sampler_rate = cb->qsi.min_sampl_rate; | ||
1098 | } | ||
1099 | if (max_sampler_rate != cb->qsi.max_sampl_rate) { | ||
1100 | if (max_sampler_rate) { | ||
1101 | printk(KERN_WARNING | ||
1102 | "hwsampler: different max sampler rate values.\n"); | ||
1103 | if (max_sampler_rate > cb->qsi.max_sampl_rate) | ||
1104 | max_sampler_rate = | ||
1105 | cb->qsi.max_sampl_rate; | ||
1106 | } else | ||
1107 | max_sampler_rate = cb->qsi.max_sampl_rate; | ||
1108 | } | ||
1109 | } | ||
1110 | register_external_interrupt(0x1407, hws_ext_handler); | ||
1111 | |||
1112 | hws_state = HWS_DEALLOCATED; | ||
1113 | rc = 0; | ||
1114 | |||
1115 | setup_exit: | ||
1116 | mutex_unlock(&hws_sem); | ||
1117 | return rc; | ||
1118 | } | ||
1119 | |||
1120 | int hwsampler_shutdown() | ||
1121 | { | ||
1122 | int rc; | ||
1123 | |||
1124 | mutex_lock(&hws_sem); | ||
1125 | |||
1126 | rc = -EINVAL; | ||
1127 | if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) { | ||
1128 | mutex_unlock(&hws_sem); | ||
1129 | |||
1130 | if (hws_wq) | ||
1131 | flush_workqueue(hws_wq); | ||
1132 | |||
1133 | mutex_lock(&hws_sem); | ||
1134 | |||
1135 | if (hws_state == HWS_STOPPED) { | ||
1136 | smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */ | ||
1137 | deallocate_sdbt(); | ||
1138 | } | ||
1139 | if (hws_wq) { | ||
1140 | destroy_workqueue(hws_wq); | ||
1141 | hws_wq = NULL; | ||
1142 | } | ||
1143 | |||
1144 | unregister_external_interrupt(0x1407, hws_ext_handler); | ||
1145 | hws_state = HWS_INIT; | ||
1146 | rc = 0; | ||
1147 | } | ||
1148 | mutex_unlock(&hws_sem); | ||
1149 | |||
1150 | unregister_cpu_notifier(&hws_cpu_notifier); | ||
1151 | |||
1152 | return rc; | ||
1153 | } | ||
1154 | |||
1155 | /** | ||
1156 | * hwsampler_start_all() - start hardware sampling on all online CPUs | ||
1157 | * @rate: specifies the used interval when samples are taken | ||
1158 | * | ||
1159 | * Returns 0 on success, !0 on failure. | ||
1160 | */ | ||
1161 | int hwsampler_start_all(unsigned long rate) | ||
1162 | { | ||
1163 | int rc, cpu; | ||
1164 | |||
1165 | mutex_lock(&hws_sem); | ||
1166 | |||
1167 | hws_oom = 0; | ||
1168 | |||
1169 | rc = -EINVAL; | ||
1170 | if (hws_state != HWS_STOPPED) | ||
1171 | goto start_all_exit; | ||
1172 | |||
1173 | interval = rate; | ||
1174 | |||
1175 | /* fail if rate is not valid */ | ||
1176 | if (interval < min_sampler_rate || interval > max_sampler_rate) | ||
1177 | goto start_all_exit; | ||
1178 | |||
1179 | rc = check_qsi_on_start(); | ||
1180 | if (rc) | ||
1181 | goto start_all_exit; | ||
1182 | |||
1183 | rc = prepare_cpu_buffers(); | ||
1184 | if (rc) | ||
1185 | goto start_all_exit; | ||
1186 | |||
1187 | for_each_online_cpu(cpu) { | ||
1188 | rc = start_sampling(cpu); | ||
1189 | if (rc) | ||
1190 | break; | ||
1191 | } | ||
1192 | if (rc) { | ||
1193 | for_each_online_cpu(cpu) { | ||
1194 | stop_sampling(cpu); | ||
1195 | } | ||
1196 | goto start_all_exit; | ||
1197 | } | ||
1198 | hws_state = HWS_STARTED; | ||
1199 | rc = 0; | ||
1200 | |||
1201 | start_all_exit: | ||
1202 | mutex_unlock(&hws_sem); | ||
1203 | |||
1204 | if (rc) | ||
1205 | return rc; | ||
1206 | |||
1207 | register_oom_notifier(&hws_oom_notifier); | ||
1208 | hws_oom = 1; | ||
1209 | hws_flush_all = 0; | ||
1210 | /* now let them in, 1407 CPUMF external interrupts */ | ||
1211 | smp_ctl_set_bit(0, 5); /* set CR0 bit 58 */ | ||
1212 | |||
1213 | return 0; | ||
1214 | } | ||
1215 | |||
1216 | /** | ||
1217 | * hwsampler_stop_all() - stop hardware sampling on all online CPUs | ||
1218 | * | ||
1219 | * Returns 0 on success, !0 on failure. | ||
1220 | */ | ||
1221 | int hwsampler_stop_all() | ||
1222 | { | ||
1223 | int tmp_rc, rc, cpu; | ||
1224 | struct hws_cpu_buffer *cb; | ||
1225 | |||
1226 | mutex_lock(&hws_sem); | ||
1227 | |||
1228 | rc = 0; | ||
1229 | if (hws_state == HWS_INIT) { | ||
1230 | mutex_unlock(&hws_sem); | ||
1231 | return rc; | ||
1232 | } | ||
1233 | hws_state = HWS_STOPPING; | ||
1234 | mutex_unlock(&hws_sem); | ||
1235 | |||
1236 | for_each_online_cpu(cpu) { | ||
1237 | cb = &per_cpu(sampler_cpu_buffer, cpu); | ||
1238 | cb->stop_mode = 1; | ||
1239 | tmp_rc = stop_sampling(cpu); | ||
1240 | if (tmp_rc) | ||
1241 | rc = tmp_rc; | ||
1242 | } | ||
1243 | |||
1244 | if (hws_wq) | ||
1245 | flush_workqueue(hws_wq); | ||
1246 | |||
1247 | mutex_lock(&hws_sem); | ||
1248 | if (hws_oom) { | ||
1249 | unregister_oom_notifier(&hws_oom_notifier); | ||
1250 | hws_oom = 0; | ||
1251 | } | ||
1252 | hws_state = HWS_STOPPED; | ||
1253 | mutex_unlock(&hws_sem); | ||
1254 | |||
1255 | return rc; | ||
1256 | } | ||
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h new file mode 100644 index 000000000000..8c72b59316b5 --- /dev/null +++ b/arch/s390/oprofile/hwsampler.h | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * CPUMF HW sampler functions and internal structures | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * Author(s): Heinz Graalfs <graalfs@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef HWSAMPLER_H_ | ||
9 | #define HWSAMPLER_H_ | ||
10 | |||
11 | #include <linux/workqueue.h> | ||
12 | |||
13 | struct hws_qsi_info_block /* QUERY SAMPLING information block */ | ||
14 | { /* Bit(s) */ | ||
15 | unsigned int b0_13:14; /* 0-13: zeros */ | ||
16 | unsigned int as:1; /* 14: sampling authorisation control*/ | ||
17 | unsigned int b15_21:7; /* 15-21: zeros */ | ||
18 | unsigned int es:1; /* 22: sampling enable control */ | ||
19 | unsigned int b23_29:7; /* 23-29: zeros */ | ||
20 | unsigned int cs:1; /* 30: sampling activation control */ | ||
21 | unsigned int:1; /* 31: reserved */ | ||
22 | unsigned int bsdes:16; /* 4-5: size of sampling entry */ | ||
23 | unsigned int:16; /* 6-7: reserved */ | ||
24 | unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ | ||
25 | unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ | ||
26 | unsigned long tear; /* 24-31: TEAR contents */ | ||
27 | unsigned long dear; /* 32-39: DEAR contents */ | ||
28 | unsigned int rsvrd0; /* 40-43: reserved */ | ||
29 | unsigned int cpu_speed; /* 44-47: CPU speed */ | ||
30 | unsigned long long rsvrd1; /* 48-55: reserved */ | ||
31 | unsigned long long rsvrd2; /* 56-63: reserved */ | ||
32 | }; | ||
33 | |||
34 | struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ | ||
35 | { /* bytes 0 - 7 Bit(s) */ | ||
36 | unsigned int s:1; /* 0: maximum buffer indicator */ | ||
37 | unsigned int h:1; /* 1: part. level reserved for VM use*/ | ||
38 | unsigned long b2_53:52; /* 2-53: zeros */ | ||
39 | unsigned int es:1; /* 54: sampling enable control */ | ||
40 | unsigned int b55_61:7; /* 55-61: - zeros */ | ||
41 | unsigned int cs:1; /* 62: sampling activation control */ | ||
42 | unsigned int b63:1; /* 63: zero */ | ||
43 | unsigned long interval; /* 8-15: sampling interval */ | ||
44 | unsigned long tear; /* 16-23: TEAR contents */ | ||
45 | unsigned long dear; /* 24-31: DEAR contents */ | ||
46 | /* 32-63: */ | ||
47 | unsigned long rsvrd1; /* reserved */ | ||
48 | unsigned long rsvrd2; /* reserved */ | ||
49 | unsigned long rsvrd3; /* reserved */ | ||
50 | unsigned long rsvrd4; /* reserved */ | ||
51 | }; | ||
52 | |||
53 | struct hws_cpu_buffer { | ||
54 | unsigned long first_sdbt; /* @ of 1st SDB-Table for this CP*/ | ||
55 | unsigned long worker_entry; | ||
56 | unsigned long sample_overflow; /* taken from SDB ... */ | ||
57 | struct hws_qsi_info_block qsi; | ||
58 | struct hws_ssctl_request_block ssctl; | ||
59 | struct work_struct worker; | ||
60 | atomic_t ext_params; | ||
61 | unsigned long req_alert; | ||
62 | unsigned long loss_of_sample_data; | ||
63 | unsigned long invalid_entry_address; | ||
64 | unsigned long incorrect_sdbt_entry; | ||
65 | unsigned long sample_auth_change_alert; | ||
66 | unsigned int finish:1; | ||
67 | unsigned int oom:1; | ||
68 | unsigned int stop_mode:1; | ||
69 | }; | ||
70 | |||
71 | struct hws_data_entry { | ||
72 | unsigned int def:16; /* 0-15 Data Entry Format */ | ||
73 | unsigned int R:4; /* 16-19 reserved */ | ||
74 | unsigned int U:4; /* 20-23 Number of unique instruct. */ | ||
75 | unsigned int z:2; /* zeros */ | ||
76 | unsigned int T:1; /* 26 PSW DAT mode */ | ||
77 | unsigned int W:1; /* 27 PSW wait state */ | ||
78 | unsigned int P:1; /* 28 PSW Problem state */ | ||
79 | unsigned int AS:2; /* 29-30 PSW address-space control */ | ||
80 | unsigned int I:1; /* 31 entry valid or invalid */ | ||
81 | unsigned int:16; | ||
82 | unsigned int prim_asn:16; /* primary ASN */ | ||
83 | unsigned long long ia; /* Instruction Address */ | ||
84 | unsigned long long lpp; /* Logical-Partition Program Param. */ | ||
85 | unsigned long long vpp; /* Virtual-Machine Program Param. */ | ||
86 | }; | ||
87 | |||
88 | struct hws_trailer_entry { | ||
89 | unsigned int f:1; /* 0 - Block Full Indicator */ | ||
90 | unsigned int a:1; /* 1 - Alert request control */ | ||
91 | unsigned long:62; /* 2 - 63: Reserved */ | ||
92 | unsigned long overflow; /* 64 - sample Overflow count */ | ||
93 | unsigned long timestamp; /* 16 - time-stamp */ | ||
94 | unsigned long timestamp1; /* */ | ||
95 | unsigned long reserved1; /* 32 -Reserved */ | ||
96 | unsigned long reserved2; /* */ | ||
97 | unsigned long progusage1; /* 48 - reserved for programming use */ | ||
98 | unsigned long progusage2; /* */ | ||
99 | }; | ||
100 | |||
101 | int hwsampler_setup(void); | ||
102 | int hwsampler_shutdown(void); | ||
103 | int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); | ||
104 | int hwsampler_deallocate(void); | ||
105 | long hwsampler_query_min_interval(void); | ||
106 | long hwsampler_query_max_interval(void); | ||
107 | int hwsampler_start_all(unsigned long interval); | ||
108 | int hwsampler_stop_all(void); | ||
109 | int hwsampler_deactivate(unsigned int cpu); | ||
110 | int hwsampler_activate(unsigned int cpu); | ||
111 | unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu); | ||
112 | |||
113 | #endif /*HWSAMPLER_H_*/ | ||
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 7a995113b918..16c76def4a9d 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -4,23 +4,182 @@ | |||
4 | * S390 Version | 4 | * S390 Version |
5 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation |
6 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 6 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) |
7 | * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com) | ||
8 | * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com) | ||
7 | * | 9 | * |
8 | * @remark Copyright 2002 OProfile authors | 10 | * @remark Copyright 2002-2011 OProfile authors |
9 | */ | 11 | */ |
10 | 12 | ||
11 | #include <linux/oprofile.h> | 13 | #include <linux/oprofile.h> |
12 | #include <linux/init.h> | 14 | #include <linux/init.h> |
13 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/oprofile.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/fs.h> | ||
19 | |||
20 | #include "../../../drivers/oprofile/oprof.h" | ||
21 | #include "hwsampler.h" | ||
22 | |||
23 | #define DEFAULT_INTERVAL 4096 | ||
24 | |||
25 | #define DEFAULT_SDBT_BLOCKS 1 | ||
26 | #define DEFAULT_SDB_BLOCKS 511 | ||
27 | |||
28 | static unsigned long oprofile_hw_interval = DEFAULT_INTERVAL; | ||
29 | static unsigned long oprofile_min_interval; | ||
30 | static unsigned long oprofile_max_interval; | ||
31 | |||
32 | static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS; | ||
33 | static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS; | ||
14 | 34 | ||
35 | static int hwsampler_file; | ||
36 | static int hwsampler_running; /* start_mutex must be held to change */ | ||
37 | |||
38 | static struct oprofile_operations timer_ops; | ||
15 | 39 | ||
16 | extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); | 40 | extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); |
17 | 41 | ||
18 | int __init oprofile_arch_init(struct oprofile_operations* ops) | 42 | static int oprofile_hwsampler_start(void) |
43 | { | ||
44 | int retval; | ||
45 | |||
46 | hwsampler_running = hwsampler_file; | ||
47 | |||
48 | if (!hwsampler_running) | ||
49 | return timer_ops.start(); | ||
50 | |||
51 | retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); | ||
52 | if (retval) | ||
53 | return retval; | ||
54 | |||
55 | retval = hwsampler_start_all(oprofile_hw_interval); | ||
56 | if (retval) | ||
57 | hwsampler_deallocate(); | ||
58 | |||
59 | return retval; | ||
60 | } | ||
61 | |||
62 | static void oprofile_hwsampler_stop(void) | ||
63 | { | ||
64 | if (!hwsampler_running) { | ||
65 | timer_ops.stop(); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | hwsampler_stop_all(); | ||
70 | hwsampler_deallocate(); | ||
71 | return; | ||
72 | } | ||
73 | |||
74 | static ssize_t hwsampler_read(struct file *file, char __user *buf, | ||
75 | size_t count, loff_t *offset) | ||
76 | { | ||
77 | return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset); | ||
78 | } | ||
79 | |||
80 | static ssize_t hwsampler_write(struct file *file, char const __user *buf, | ||
81 | size_t count, loff_t *offset) | ||
82 | { | ||
83 | unsigned long val; | ||
84 | int retval; | ||
85 | |||
86 | if (*offset) | ||
87 | return -EINVAL; | ||
88 | |||
89 | retval = oprofilefs_ulong_from_user(&val, buf, count); | ||
90 | if (retval) | ||
91 | return retval; | ||
92 | |||
93 | if (oprofile_started) | ||
94 | /* | ||
95 | * save to do without locking as we set | ||
96 | * hwsampler_running in start() when start_mutex is | ||
97 | * held | ||
98 | */ | ||
99 | return -EBUSY; | ||
100 | |||
101 | hwsampler_file = val; | ||
102 | |||
103 | return count; | ||
104 | } | ||
105 | |||
106 | static const struct file_operations hwsampler_fops = { | ||
107 | .read = hwsampler_read, | ||
108 | .write = hwsampler_write, | ||
109 | }; | ||
110 | |||
111 | static int oprofile_create_hwsampling_files(struct super_block *sb, | ||
112 | struct dentry *root) | ||
113 | { | ||
114 | struct dentry *hw_dir; | ||
115 | |||
116 | /* reinitialize default values */ | ||
117 | hwsampler_file = 1; | ||
118 | |||
119 | hw_dir = oprofilefs_mkdir(sb, root, "hwsampling"); | ||
120 | if (!hw_dir) | ||
121 | return -EINVAL; | ||
122 | |||
123 | oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops); | ||
124 | oprofilefs_create_ulong(sb, hw_dir, "hw_interval", | ||
125 | &oprofile_hw_interval); | ||
126 | oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval", | ||
127 | &oprofile_min_interval); | ||
128 | oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval", | ||
129 | &oprofile_max_interval); | ||
130 | oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks", | ||
131 | &oprofile_sdbt_blocks); | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int oprofile_hwsampler_init(struct oprofile_operations *ops) | ||
137 | { | ||
138 | if (hwsampler_setup()) | ||
139 | return -ENODEV; | ||
140 | |||
141 | /* | ||
142 | * create hwsampler files only if hwsampler_setup() succeeds. | ||
143 | */ | ||
144 | oprofile_min_interval = hwsampler_query_min_interval(); | ||
145 | if (oprofile_min_interval < 0) { | ||
146 | oprofile_min_interval = 0; | ||
147 | return -ENODEV; | ||
148 | } | ||
149 | oprofile_max_interval = hwsampler_query_max_interval(); | ||
150 | if (oprofile_max_interval < 0) { | ||
151 | oprofile_max_interval = 0; | ||
152 | return -ENODEV; | ||
153 | } | ||
154 | |||
155 | if (oprofile_timer_init(ops)) | ||
156 | return -ENODEV; | ||
157 | |||
158 | printk(KERN_INFO "oprofile: using hardware sampling\n"); | ||
159 | |||
160 | memcpy(&timer_ops, ops, sizeof(timer_ops)); | ||
161 | |||
162 | ops->start = oprofile_hwsampler_start; | ||
163 | ops->stop = oprofile_hwsampler_stop; | ||
164 | ops->create_files = oprofile_create_hwsampling_files; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static void oprofile_hwsampler_exit(void) | ||
170 | { | ||
171 | oprofile_timer_exit(); | ||
172 | hwsampler_shutdown(); | ||
173 | } | ||
174 | |||
175 | int __init oprofile_arch_init(struct oprofile_operations *ops) | ||
19 | { | 176 | { |
20 | ops->backtrace = s390_backtrace; | 177 | ops->backtrace = s390_backtrace; |
21 | return -ENODEV; | 178 | |
179 | return oprofile_hwsampler_init(ops); | ||
22 | } | 180 | } |
23 | 181 | ||
24 | void oprofile_arch_exit(void) | 182 | void oprofile_arch_exit(void) |
25 | { | 183 | { |
184 | oprofile_hwsampler_exit(); | ||
26 | } | 185 | } |