diff options
66 files changed, 1048 insertions, 1072 deletions
diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt index cf45d27c4608..dc929be96016 100644 --- a/Documentation/s390/zfcpdump.txt +++ b/Documentation/s390/zfcpdump.txt | |||
| @@ -1,15 +1,15 @@ | |||
| 1 | s390 SCSI dump tool (zfcpdump) | 1 | The s390 SCSI dump tool (zfcpdump) |
| 2 | 2 | ||
| 3 | System z machines (z900 or higher) provide hardware support for creating system | 3 | System z machines (z900 or higher) provide hardware support for creating system |
| 4 | dumps on SCSI disks. The dump process is initiated by booting a dump tool, which | 4 | dumps on SCSI disks. The dump process is initiated by booting a dump tool, which |
| 5 | has to create a dump of the current (probably crashed) Linux image. In order to | 5 | has to create a dump of the current (probably crashed) Linux image. In order to |
| 6 | not overwrite memory of the crashed Linux with data of the dump tool, the | 6 | not overwrite memory of the crashed Linux with data of the dump tool, the |
| 7 | hardware saves some memory plus the register sets of the boot cpu before the | 7 | hardware saves some memory plus the register sets of the boot CPU before the |
| 8 | dump tool is loaded. There exists an SCLP hardware interface to obtain the saved | 8 | dump tool is loaded. There exists an SCLP hardware interface to obtain the saved |
| 9 | memory afterwards. Currently 32 MB are saved. | 9 | memory afterwards. Currently 32 MB are saved. |
| 10 | 10 | ||
| 11 | This zfcpdump implementation consists of a Linux dump kernel together with | 11 | This zfcpdump implementation consists of a Linux dump kernel together with |
| 12 | a userspace dump tool, which are loaded together into the saved memory region | 12 | a user space dump tool, which are loaded together into the saved memory region |
| 13 | below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in | 13 | below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in |
| 14 | the s390-tools package) to make the device bootable. The operator of a Linux | 14 | the s390-tools package) to make the device bootable. The operator of a Linux |
| 15 | system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump | 15 | system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump |
| @@ -19,68 +19,33 @@ The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem", | |||
| 19 | which exports memory and registers of the crashed Linux in an s390 | 19 | which exports memory and registers of the crashed Linux in an s390 |
| 20 | standalone dump format. It can be used in the same way as e.g. /dev/mem. The | 20 | standalone dump format. It can be used in the same way as e.g. /dev/mem. The |
| 21 | dump format defines a 4K header followed by plain uncompressed memory. The | 21 | dump format defines a 4K header followed by plain uncompressed memory. The |
| 22 | register sets are stored in the prefix pages of the respective cpus. To build a | 22 | register sets are stored in the prefix pages of the respective CPUs. To build a |
| 23 | dump enabled kernel with the zcore driver, the kernel config option | 23 | dump enabled kernel with the zcore driver, the kernel config option |
| 24 | CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of | 24 | CONFIG_CRASH_DUMP has to be set. When reading from "zcore/mem", the part of |
| 25 | memory, which has been saved by hardware is read by the driver via the SCLP | 25 | memory, which has been saved by hardware is read by the driver via the SCLP |
| 26 | hardware interface. The second part is just copied from the non overwritten real | 26 | hardware interface. The second part is just copied from the non overwritten real |
| 27 | memory. | 27 | memory. |
| 28 | 28 | ||
| 29 | The userspace application of zfcpdump can reside e.g. in an intitramfs or an | 29 | Since kernel version 3.12 also the /proc/vmcore file can also be used to access |
| 30 | initrd. It reads from zcore/mem and writes the system dump to a file on a | 30 | the dump. |
| 31 | SCSI disk. | ||
| 32 | 31 | ||
| 33 | To build a zfcpdump kernel use the following settings in your kernel | 32 | To get a valid zfcpdump kernel configuration use "make zfcpdump_defconfig". |
| 34 | configuration: | ||
| 35 | * CONFIG_ZFCPDUMP=y | ||
| 36 | * Enable ZFCP driver | ||
| 37 | * Enable SCSI driver | ||
| 38 | * Enable ext2 and ext3 filesystems | ||
| 39 | * Disable as many features as possible to keep the kernel small. | ||
| 40 | E.g. network support is not needed at all. | ||
| 41 | 33 | ||
| 42 | To use the zfcpdump userspace application in an initramfs you have to do the | 34 | The s390 zipl tool looks for the zfcpdump kernel and optional initrd/initramfs |
| 43 | following: | 35 | under the following locations: |
| 44 | 36 | ||
| 45 | * Copy the zfcpdump executable somewhere into your Linux tree. | 37 | * kernel: <zfcpdump directory>/zfcpdump.image |
| 46 | E.g. to "arch/s390/boot/zfcpdump. If you do not want to include | 38 | * ramdisk: <zfcpdump directory>/zfcpdump.rd |
| 47 | shared libraries, compile the tool with the "-static" gcc option. | ||
| 48 | * If you want to include e2fsck, add it to your source tree, too. The zfcpdump | ||
| 49 | application attempts to start /sbin/e2fsck from the ramdisk. | ||
| 50 | * Use an initramfs config file like the following: | ||
| 51 | 39 | ||
| 52 | dir /dev 755 0 0 | 40 | The zfcpdump directory is defined in the s390-tools package. |
| 53 | nod /dev/console 644 0 0 c 5 1 | ||
| 54 | nod /dev/null 644 0 0 c 1 3 | ||
| 55 | nod /dev/sda1 644 0 0 b 8 1 | ||
| 56 | nod /dev/sda2 644 0 0 b 8 2 | ||
| 57 | nod /dev/sda3 644 0 0 b 8 3 | ||
| 58 | nod /dev/sda4 644 0 0 b 8 4 | ||
| 59 | nod /dev/sda5 644 0 0 b 8 5 | ||
| 60 | nod /dev/sda6 644 0 0 b 8 6 | ||
| 61 | nod /dev/sda7 644 0 0 b 8 7 | ||
| 62 | nod /dev/sda8 644 0 0 b 8 8 | ||
| 63 | nod /dev/sda9 644 0 0 b 8 9 | ||
| 64 | nod /dev/sda10 644 0 0 b 8 10 | ||
| 65 | nod /dev/sda11 644 0 0 b 8 11 | ||
| 66 | nod /dev/sda12 644 0 0 b 8 12 | ||
| 67 | nod /dev/sda13 644 0 0 b 8 13 | ||
| 68 | nod /dev/sda14 644 0 0 b 8 14 | ||
| 69 | nod /dev/sda15 644 0 0 b 8 15 | ||
| 70 | file /init arch/s390/boot/zfcpdump 755 0 0 | ||
| 71 | file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0 | ||
| 72 | dir /proc 755 0 0 | ||
| 73 | dir /sys 755 0 0 | ||
| 74 | dir /mnt 755 0 0 | ||
| 75 | dir /sbin 755 0 0 | ||
| 76 | 41 | ||
| 77 | * Issue "make image" to build the zfcpdump image with initramfs. | 42 | The user space application of zfcpdump can reside in an intitramfs or an |
| 43 | initrd. It can also be included in a built-in kernel initramfs. The application | ||
| 44 | reads from /proc/vmcore or zcore/mem and writes the system dump to a SCSI disk. | ||
| 78 | 45 | ||
| 79 | In a Linux distribution the zfcpdump enabled kernel image must be copied to | 46 | The s390-tools package version 1.24.0 and above builds an external zfcpdump |
| 80 | /usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the | 47 | initramfs with a user space application that writes the dump to a SCSI |
| 81 | dump kernel when preparing a SCSI dump disk. | 48 | partition. |
| 82 | |||
| 83 | If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd". | ||
| 84 | 49 | ||
| 85 | For more information on how to use zfcpdump refer to the s390 'Using the Dump | 50 | For more information on how to use zfcpdump refer to the s390 'Using the Dump |
| 86 | Tools book', which is available from | 51 | Tools book', which is available from |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index d68fe34799b0..bb63499fc5d3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -60,7 +60,6 @@ config PCI_QUIRKS | |||
| 60 | 60 | ||
| 61 | config S390 | 61 | config S390 |
| 62 | def_bool y | 62 | def_bool y |
| 63 | select ARCH_DISCARD_MEMBLOCK | ||
| 64 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 63 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
| 65 | select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS | 64 | select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS |
| 66 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 65 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
| @@ -130,6 +129,7 @@ config S390 | |||
| 130 | select HAVE_KVM if 64BIT | 129 | select HAVE_KVM if 64BIT |
| 131 | select HAVE_MEMBLOCK | 130 | select HAVE_MEMBLOCK |
| 132 | select HAVE_MEMBLOCK_NODE_MAP | 131 | select HAVE_MEMBLOCK_NODE_MAP |
| 132 | select HAVE_MEMBLOCK_PHYS_MAP | ||
| 133 | select HAVE_MOD_ARCH_SPECIFIC | 133 | select HAVE_MOD_ARCH_SPECIFIC |
| 134 | select HAVE_OPROFILE | 134 | select HAVE_OPROFILE |
| 135 | select HAVE_PERF_EVENTS | 135 | select HAVE_PERF_EVENTS |
| @@ -139,6 +139,7 @@ config S390 | |||
| 139 | select HAVE_VIRT_CPU_ACCOUNTING | 139 | select HAVE_VIRT_CPU_ACCOUNTING |
| 140 | select KTIME_SCALAR if 32BIT | 140 | select KTIME_SCALAR if 32BIT |
| 141 | select MODULES_USE_ELF_RELA | 141 | select MODULES_USE_ELF_RELA |
| 142 | select NO_BOOTMEM | ||
| 142 | select OLD_SIGACTION | 143 | select OLD_SIGACTION |
| 143 | select OLD_SIGSUSPEND3 | 144 | select OLD_SIGSUSPEND3 |
| 144 | select SYSCTL_EXCEPTION_TRACE | 145 | select SYSCTL_EXCEPTION_TRACE |
| @@ -592,21 +593,14 @@ config CRASH_DUMP | |||
| 592 | bool "kernel crash dumps" | 593 | bool "kernel crash dumps" |
| 593 | depends on 64BIT && SMP | 594 | depends on 64BIT && SMP |
| 594 | select KEXEC | 595 | select KEXEC |
| 595 | select ZFCPDUMP | ||
| 596 | help | 596 | help |
| 597 | Generate crash dump after being started by kexec. | 597 | Generate crash dump after being started by kexec. |
| 598 | Crash dump kernels are loaded in the main kernel with kexec-tools | 598 | Crash dump kernels are loaded in the main kernel with kexec-tools |
| 599 | into a specially reserved region and then later executed after | 599 | into a specially reserved region and then later executed after |
| 600 | a crash by kdump/kexec. | 600 | a crash by kdump/kexec. |
| 601 | For more details see Documentation/kdump/kdump.txt | ||
| 602 | |||
| 603 | config ZFCPDUMP | ||
| 604 | def_bool n | ||
| 605 | prompt "zfcpdump support" | ||
| 606 | depends on 64BIT && SMP | ||
| 607 | help | ||
| 608 | Select this option if you want to build an zfcpdump enabled kernel. | ||
| 609 | Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. | 601 | Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. |
| 602 | This option also enables s390 zfcpdump. | ||
| 603 | See also <file:Documentation/s390/zfcpdump.txt> | ||
| 610 | 604 | ||
| 611 | endmenu | 605 | endmenu |
| 612 | 606 | ||
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 42be53743133..edcf2a706942 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
| 14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/swap.h> | 15 | #include <linux/swap.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | #include <asm/io.h> | 17 | #include <asm/io.h> |
| 17 | 18 | ||
| 18 | #include "appldata.h" | 19 | #include "appldata.h" |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 866ecbe670e4..f90d1fc6d603 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
| @@ -12,7 +12,7 @@ targets += misc.o piggy.o sizes.h head$(BITS).o | |||
| 12 | 12 | ||
| 13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
| 14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
| 15 | KBUILD_CFLAGS += $(cflags-y) | 15 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks |
| 16 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) | 16 | KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) |
| 17 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) | 17 | KBUILD_CFLAGS += $(call cc-option,-ffreestanding) |
| 18 | 18 | ||
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index a9c2c0686177..b80e456d6428 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
| @@ -229,5 +229,5 @@ int ccw_device_siosl(struct ccw_device *); | |||
| 229 | 229 | ||
| 230 | extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); | 230 | extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); |
| 231 | 231 | ||
| 232 | extern void *ccw_device_get_chp_desc(struct ccw_device *, int); | 232 | struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *, int); |
| 233 | #endif /* _S390_CCWDEV_H_ */ | 233 | #endif /* _S390_CCWDEV_H_ */ |
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index ebc2913f9ee0..057ce0ca6377 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h | |||
| @@ -10,6 +10,8 @@ struct ccw_driver; | |||
| 10 | * @count: number of attached slave devices | 10 | * @count: number of attached slave devices |
| 11 | * @dev: embedded device structure | 11 | * @dev: embedded device structure |
| 12 | * @cdev: variable number of slave devices, allocated as needed | 12 | * @cdev: variable number of slave devices, allocated as needed |
| 13 | * @ungroup_work: work to be done when a ccwgroup notifier has action | ||
| 14 | * type %BUS_NOTIFY_UNBIND_DRIVER | ||
| 13 | */ | 15 | */ |
| 14 | struct ccwgroup_device { | 16 | struct ccwgroup_device { |
| 15 | enum { | 17 | enum { |
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h index 38c405ef89ce..7298eec98541 100644 --- a/arch/s390/include/asm/chpid.h +++ b/arch/s390/include/asm/chpid.h | |||
| @@ -8,6 +8,17 @@ | |||
| 8 | #include <uapi/asm/chpid.h> | 8 | #include <uapi/asm/chpid.h> |
| 9 | #include <asm/cio.h> | 9 | #include <asm/cio.h> |
| 10 | 10 | ||
| 11 | struct channel_path_desc { | ||
| 12 | u8 flags; | ||
| 13 | u8 lsn; | ||
| 14 | u8 desc; | ||
| 15 | u8 chpid; | ||
| 16 | u8 swla; | ||
| 17 | u8 zeroes; | ||
| 18 | u8 chla; | ||
| 19 | u8 chpp; | ||
| 20 | } __packed; | ||
| 21 | |||
| 11 | static inline void chp_id_init(struct chp_id *chpid) | 22 | static inline void chp_id_init(struct chp_id *chpid) |
| 12 | { | 23 | { |
| 13 | memset(chpid, 0, sizeof(struct chp_id)); | 24 | memset(chpid, 0, sizeof(struct chp_id)); |
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 69cf5b5eddc9..a4811aa0304d 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h | |||
| @@ -29,7 +29,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
| 29 | int cmparg = (encoded_op << 20) >> 20; | 29 | int cmparg = (encoded_op << 20) >> 20; |
| 30 | int oldval = 0, newval, ret; | 30 | int oldval = 0, newval, ret; |
| 31 | 31 | ||
| 32 | update_primary_asce(current); | 32 | load_kernel_asce(); |
| 33 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 33 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
| 34 | oparg = 1 << oparg; | 34 | oparg = 1 << oparg; |
| 35 | 35 | ||
| @@ -79,7 +79,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
| 79 | { | 79 | { |
| 80 | int ret; | 80 | int ret; |
| 81 | 81 | ||
| 82 | update_primary_asce(current); | 82 | load_kernel_asce(); |
| 83 | asm volatile( | 83 | asm volatile( |
| 84 | " sacf 256\n" | 84 | " sacf 256\n" |
| 85 | "0: cs %1,%4,0(%5)\n" | 85 | "0: cs %1,%4,0(%5)\n" |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index bbf8141408cd..2070cad80e9e 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
| @@ -93,7 +93,9 @@ struct _lowcore { | |||
| 93 | __u32 save_area_sync[8]; /* 0x0200 */ | 93 | __u32 save_area_sync[8]; /* 0x0200 */ |
| 94 | __u32 save_area_async[8]; /* 0x0220 */ | 94 | __u32 save_area_async[8]; /* 0x0220 */ |
| 95 | __u32 save_area_restart[1]; /* 0x0240 */ | 95 | __u32 save_area_restart[1]; /* 0x0240 */ |
| 96 | __u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */ | 96 | |
| 97 | /* CPU flags. */ | ||
| 98 | __u32 cpu_flags; /* 0x0244 */ | ||
| 97 | 99 | ||
| 98 | /* Return psws. */ | 100 | /* Return psws. */ |
| 99 | psw_t return_psw; /* 0x0248 */ | 101 | psw_t return_psw; /* 0x0248 */ |
| @@ -139,12 +141,9 @@ struct _lowcore { | |||
| 139 | __u32 percpu_offset; /* 0x02f0 */ | 141 | __u32 percpu_offset; /* 0x02f0 */ |
| 140 | __u32 machine_flags; /* 0x02f4 */ | 142 | __u32 machine_flags; /* 0x02f4 */ |
| 141 | __u32 ftrace_func; /* 0x02f8 */ | 143 | __u32 ftrace_func; /* 0x02f8 */ |
| 142 | __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ | 144 | __u32 spinlock_lockval; /* 0x02fc */ |
| 143 | |||
| 144 | /* Interrupt response block */ | ||
| 145 | __u8 irb[64]; /* 0x0300 */ | ||
| 146 | 145 | ||
| 147 | __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ | 146 | __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ |
| 148 | 147 | ||
| 149 | /* | 148 | /* |
| 150 | * 0xe00 contains the address of the IPL Parameter Information | 149 | * 0xe00 contains the address of the IPL Parameter Information |
| @@ -237,7 +236,9 @@ struct _lowcore { | |||
| 237 | __u64 save_area_sync[8]; /* 0x0200 */ | 236 | __u64 save_area_sync[8]; /* 0x0200 */ |
| 238 | __u64 save_area_async[8]; /* 0x0240 */ | 237 | __u64 save_area_async[8]; /* 0x0240 */ |
| 239 | __u64 save_area_restart[1]; /* 0x0280 */ | 238 | __u64 save_area_restart[1]; /* 0x0280 */ |
| 240 | __u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */ | 239 | |
| 240 | /* CPU flags. */ | ||
| 241 | __u64 cpu_flags; /* 0x0288 */ | ||
| 241 | 242 | ||
| 242 | /* Return psws. */ | 243 | /* Return psws. */ |
| 243 | psw_t return_psw; /* 0x0290 */ | 244 | psw_t return_psw; /* 0x0290 */ |
| @@ -285,15 +286,13 @@ struct _lowcore { | |||
| 285 | __u64 machine_flags; /* 0x0388 */ | 286 | __u64 machine_flags; /* 0x0388 */ |
| 286 | __u64 ftrace_func; /* 0x0390 */ | 287 | __u64 ftrace_func; /* 0x0390 */ |
| 287 | __u64 gmap; /* 0x0398 */ | 288 | __u64 gmap; /* 0x0398 */ |
| 288 | __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ | 289 | __u32 spinlock_lockval; /* 0x03a0 */ |
| 289 | 290 | __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ | |
| 290 | /* Interrupt response block. */ | ||
| 291 | __u8 irb[64]; /* 0x0400 */ | ||
| 292 | 291 | ||
| 293 | /* Per cpu primary space access list */ | 292 | /* Per cpu primary space access list */ |
| 294 | __u32 paste[16]; /* 0x0440 */ | 293 | __u32 paste[16]; /* 0x0400 */ |
| 295 | 294 | ||
| 296 | __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ | 295 | __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */ |
| 297 | 296 | ||
| 298 | /* | 297 | /* |
| 299 | * 0xe00 contains the address of the IPL Parameter Information | 298 | * 0xe00 contains the address of the IPL Parameter Information |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 71be346d0e3c..056d7eff2a16 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
| @@ -30,33 +30,31 @@ static inline int init_new_context(struct task_struct *tsk, | |||
| 30 | 30 | ||
| 31 | #define destroy_context(mm) do { } while (0) | 31 | #define destroy_context(mm) do { } while (0) |
| 32 | 32 | ||
| 33 | static inline void update_user_asce(struct mm_struct *mm, int load_primary) | 33 | static inline void set_user_asce(struct mm_struct *mm) |
| 34 | { | 34 | { |
| 35 | pgd_t *pgd = mm->pgd; | 35 | pgd_t *pgd = mm->pgd; |
| 36 | 36 | ||
| 37 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); | 37 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
| 38 | if (load_primary) | ||
| 39 | __ctl_load(S390_lowcore.user_asce, 1, 1); | ||
| 40 | set_fs(current->thread.mm_segment); | 38 | set_fs(current->thread.mm_segment); |
| 39 | set_cpu_flag(CIF_ASCE); | ||
| 41 | } | 40 | } |
| 42 | 41 | ||
| 43 | static inline void clear_user_asce(struct mm_struct *mm, int load_primary) | 42 | static inline void clear_user_asce(void) |
| 44 | { | 43 | { |
| 45 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; | 44 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; |
| 46 | 45 | ||
| 47 | if (load_primary) | 46 | __ctl_load(S390_lowcore.user_asce, 1, 1); |
| 48 | __ctl_load(S390_lowcore.user_asce, 1, 1); | ||
| 49 | __ctl_load(S390_lowcore.user_asce, 7, 7); | 47 | __ctl_load(S390_lowcore.user_asce, 7, 7); |
| 50 | } | 48 | } |
| 51 | 49 | ||
| 52 | static inline void update_primary_asce(struct task_struct *tsk) | 50 | static inline void load_kernel_asce(void) |
| 53 | { | 51 | { |
| 54 | unsigned long asce; | 52 | unsigned long asce; |
| 55 | 53 | ||
| 56 | __ctl_store(asce, 1, 1); | 54 | __ctl_store(asce, 1, 1); |
| 57 | if (asce != S390_lowcore.kernel_asce) | 55 | if (asce != S390_lowcore.kernel_asce) |
| 58 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | 56 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 59 | set_tsk_thread_flag(tsk, TIF_ASCE); | 57 | set_cpu_flag(CIF_ASCE); |
| 60 | } | 58 | } |
| 61 | 59 | ||
| 62 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 60 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| @@ -64,25 +62,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 64 | { | 62 | { |
| 65 | int cpu = smp_processor_id(); | 63 | int cpu = smp_processor_id(); |
| 66 | 64 | ||
| 67 | update_primary_asce(tsk); | ||
| 68 | if (prev == next) | 65 | if (prev == next) |
| 69 | return; | 66 | return; |
| 70 | if (MACHINE_HAS_TLB_LC) | 67 | if (MACHINE_HAS_TLB_LC) |
| 71 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); | 68 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); |
| 72 | if (atomic_inc_return(&next->context.attach_count) >> 16) { | 69 | /* Clear old ASCE by loading the kernel ASCE. */ |
| 73 | /* Delay update_user_asce until all TLB flushes are done. */ | 70 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 74 | set_tsk_thread_flag(tsk, TIF_TLB_WAIT); | 71 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 75 | /* Clear old ASCE by loading the kernel ASCE. */ | 72 | /* Delay loading of the new ASCE to control registers CR1 & CR7 */ |
| 76 | clear_user_asce(next, 0); | 73 | set_cpu_flag(CIF_ASCE); |
| 77 | } else { | 74 | atomic_inc(&next->context.attach_count); |
| 78 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
| 79 | update_user_asce(next, 0); | ||
| 80 | if (next->context.flush_mm) | ||
| 81 | /* Flush pending TLBs */ | ||
| 82 | __tlb_flush_mm(next); | ||
| 83 | } | ||
| 84 | atomic_dec(&prev->context.attach_count); | 75 | atomic_dec(&prev->context.attach_count); |
| 85 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | ||
| 86 | if (MACHINE_HAS_TLB_LC) | 76 | if (MACHINE_HAS_TLB_LC) |
| 87 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 77 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
| 88 | } | 78 | } |
| @@ -93,15 +83,14 @@ static inline void finish_arch_post_lock_switch(void) | |||
| 93 | struct task_struct *tsk = current; | 83 | struct task_struct *tsk = current; |
| 94 | struct mm_struct *mm = tsk->mm; | 84 | struct mm_struct *mm = tsk->mm; |
| 95 | 85 | ||
| 96 | if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT)) | 86 | if (!mm) |
| 97 | return; | 87 | return; |
| 98 | preempt_disable(); | 88 | preempt_disable(); |
| 99 | clear_tsk_thread_flag(tsk, TIF_TLB_WAIT); | ||
| 100 | while (atomic_read(&mm->context.attach_count) >> 16) | 89 | while (atomic_read(&mm->context.attach_count) >> 16) |
| 101 | cpu_relax(); | 90 | cpu_relax(); |
| 102 | 91 | ||
| 103 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 92 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
| 104 | update_user_asce(mm, 0); | 93 | set_user_asce(mm); |
| 105 | if (mm->context.flush_mm) | 94 | if (mm->context.flush_mm) |
| 106 | __tlb_flush_mm(mm); | 95 | __tlb_flush_mm(mm); |
| 107 | preempt_enable(); | 96 | preempt_enable(); |
| @@ -113,7 +102,9 @@ static inline void finish_arch_post_lock_switch(void) | |||
| 113 | static inline void activate_mm(struct mm_struct *prev, | 102 | static inline void activate_mm(struct mm_struct *prev, |
| 114 | struct mm_struct *next) | 103 | struct mm_struct *next) |
| 115 | { | 104 | { |
| 116 | switch_mm(prev, next, current); | 105 | switch_mm(prev, next, current); |
| 106 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); | ||
| 107 | set_user_asce(next); | ||
| 117 | } | 108 | } |
| 118 | 109 | ||
| 119 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | 110 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 79b5f0783a30..c030900320e0 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h | |||
| @@ -78,10 +78,16 @@ struct zpci_dev { | |||
| 78 | enum zpci_state state; | 78 | enum zpci_state state; |
| 79 | u32 fid; /* function ID, used by sclp */ | 79 | u32 fid; /* function ID, used by sclp */ |
| 80 | u32 fh; /* function handle, used by insn's */ | 80 | u32 fh; /* function handle, used by insn's */ |
| 81 | u16 vfn; /* virtual function number */ | ||
| 81 | u16 pchid; /* physical channel ID */ | 82 | u16 pchid; /* physical channel ID */ |
| 82 | u8 pfgid; /* function group ID */ | 83 | u8 pfgid; /* function group ID */ |
| 84 | u8 pft; /* pci function type */ | ||
| 83 | u16 domain; | 85 | u16 domain; |
| 84 | 86 | ||
| 87 | u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ | ||
| 88 | u32 uid; /* user defined id */ | ||
| 89 | u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ | ||
| 90 | |||
| 85 | /* IRQ stuff */ | 91 | /* IRQ stuff */ |
| 86 | u64 msi_addr; /* MSI address */ | 92 | u64 msi_addr; /* MSI address */ |
| 87 | struct airq_iv *aibv; /* adapter interrupt bit vector */ | 93 | struct airq_iv *aibv; /* adapter interrupt bit vector */ |
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index d31d739f8689..dd78f92f1cce 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h | |||
| @@ -44,6 +44,7 @@ struct clp_fh_list_entry { | |||
| 44 | #define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */ | 44 | #define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */ |
| 45 | 45 | ||
| 46 | #define CLP_UTIL_STR_LEN 64 | 46 | #define CLP_UTIL_STR_LEN 64 |
| 47 | #define CLP_PFIP_NR_SEGMENTS 4 | ||
| 47 | 48 | ||
| 48 | /* List PCI functions request */ | 49 | /* List PCI functions request */ |
| 49 | struct clp_req_list_pci { | 50 | struct clp_req_list_pci { |
| @@ -85,7 +86,7 @@ struct clp_rsp_query_pci { | |||
| 85 | struct clp_rsp_hdr hdr; | 86 | struct clp_rsp_hdr hdr; |
| 86 | u32 fmt : 4; /* cmd request block format */ | 87 | u32 fmt : 4; /* cmd request block format */ |
| 87 | u32 : 28; | 88 | u32 : 28; |
| 88 | u64 reserved1; | 89 | u64 : 64; |
| 89 | u16 vfn; /* virtual fn number */ | 90 | u16 vfn; /* virtual fn number */ |
| 90 | u16 : 7; | 91 | u16 : 7; |
| 91 | u16 util_str_avail : 1; /* utility string available? */ | 92 | u16 util_str_avail : 1; /* utility string available? */ |
| @@ -94,10 +95,13 @@ struct clp_rsp_query_pci { | |||
| 94 | u8 bar_size[PCI_BAR_COUNT]; | 95 | u8 bar_size[PCI_BAR_COUNT]; |
| 95 | u16 pchid; | 96 | u16 pchid; |
| 96 | u32 bar[PCI_BAR_COUNT]; | 97 | u32 bar[PCI_BAR_COUNT]; |
| 97 | u64 reserved2; | 98 | u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ |
| 99 | u32 : 24; | ||
| 100 | u8 pft; /* pci function type */ | ||
| 98 | u64 sdma; /* start dma as */ | 101 | u64 sdma; /* start dma as */ |
| 99 | u64 edma; /* end dma as */ | 102 | u64 edma; /* end dma as */ |
| 100 | u64 reserved3[6]; | 103 | u32 reserved[11]; |
| 104 | u32 uid; /* user defined id */ | ||
| 101 | u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ | 105 | u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ |
| 102 | } __packed; | 106 | } __packed; |
| 103 | 107 | ||
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index dc5fc4f90e52..6f02d452bbee 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
| @@ -11,6 +11,13 @@ | |||
| 11 | #ifndef __ASM_S390_PROCESSOR_H | 11 | #ifndef __ASM_S390_PROCESSOR_H |
| 12 | #define __ASM_S390_PROCESSOR_H | 12 | #define __ASM_S390_PROCESSOR_H |
| 13 | 13 | ||
| 14 | #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ | ||
| 15 | #define CIF_ASCE 1 /* user asce needs fixup / uaccess */ | ||
| 16 | |||
| 17 | #define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) | ||
| 18 | #define _CIF_ASCE (1<<CIF_ASCE) | ||
| 19 | |||
| 20 | |||
| 14 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
| 15 | 22 | ||
| 16 | #include <linux/linkage.h> | 23 | #include <linux/linkage.h> |
| @@ -21,6 +28,21 @@ | |||
| 21 | #include <asm/setup.h> | 28 | #include <asm/setup.h> |
| 22 | #include <asm/runtime_instr.h> | 29 | #include <asm/runtime_instr.h> |
| 23 | 30 | ||
| 31 | static inline void set_cpu_flag(int flag) | ||
| 32 | { | ||
| 33 | S390_lowcore.cpu_flags |= (1U << flag); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline void clear_cpu_flag(int flag) | ||
| 37 | { | ||
| 38 | S390_lowcore.cpu_flags &= ~(1U << flag); | ||
| 39 | } | ||
| 40 | |||
| 41 | static inline int test_cpu_flag(int flag) | ||
| 42 | { | ||
| 43 | return !!(S390_lowcore.cpu_flags & (1U << flag)); | ||
| 44 | } | ||
| 45 | |||
| 24 | /* | 46 | /* |
| 25 | * Default implementation of macro that returns current | 47 | * Default implementation of macro that returns current |
| 26 | * instruction pointer ("program counter"). | 48 | * instruction pointer ("program counter"). |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index f4783c0b7b43..1b5300cd6d22 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
| @@ -8,6 +8,12 @@ | |||
| 8 | 8 | ||
| 9 | #include <uapi/asm/ptrace.h> | 9 | #include <uapi/asm/ptrace.h> |
| 10 | 10 | ||
| 11 | #define PIF_SYSCALL 0 /* inside a system call */ | ||
| 12 | #define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ | ||
| 13 | |||
| 14 | #define _PIF_SYSCALL (1<<PIF_SYSCALL) | ||
| 15 | #define _PIF_PER_TRAP (1<<PIF_PER_TRAP) | ||
| 16 | |||
| 11 | #ifndef __ASSEMBLY__ | 17 | #ifndef __ASSEMBLY__ |
| 12 | 18 | ||
| 13 | #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ | 19 | #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ |
| @@ -29,6 +35,7 @@ struct pt_regs | |||
| 29 | unsigned int int_code; | 35 | unsigned int int_code; |
| 30 | unsigned int int_parm; | 36 | unsigned int int_parm; |
| 31 | unsigned long int_parm_long; | 37 | unsigned long int_parm_long; |
| 38 | unsigned long flags; | ||
| 32 | }; | 39 | }; |
| 33 | 40 | ||
| 34 | /* | 41 | /* |
| @@ -79,6 +86,21 @@ struct per_struct_kernel { | |||
| 79 | #define PER_CONTROL_SUSPENSION 0x00400000UL | 86 | #define PER_CONTROL_SUSPENSION 0x00400000UL |
| 80 | #define PER_CONTROL_ALTERATION 0x00200000UL | 87 | #define PER_CONTROL_ALTERATION 0x00200000UL |
| 81 | 88 | ||
| 89 | static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) | ||
| 90 | { | ||
| 91 | regs->flags |= (1U << flag); | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) | ||
| 95 | { | ||
| 96 | regs->flags &= ~(1U << flag); | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) | ||
| 100 | { | ||
| 101 | return !!(regs->flags & (1U << flag)); | ||
| 102 | } | ||
| 103 | |||
| 82 | /* | 104 | /* |
| 83 | * These are defined as per linux/ptrace.h, which see. | 105 | * These are defined as per linux/ptrace.h, which see. |
| 84 | */ | 106 | */ |
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b31b22dba948..089a49814c50 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | 9 | ||
| 10 | 10 | ||
| 11 | #define PARMAREA 0x10400 | 11 | #define PARMAREA 0x10400 |
| 12 | #define MEMORY_CHUNKS 256 | ||
| 13 | 12 | ||
| 14 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
| 15 | 14 | ||
| @@ -31,22 +30,11 @@ | |||
| 31 | #endif /* CONFIG_64BIT */ | 30 | #endif /* CONFIG_64BIT */ |
| 32 | #define COMMAND_LINE ((char *) (0x10480)) | 31 | #define COMMAND_LINE ((char *) (0x10480)) |
| 33 | 32 | ||
| 34 | #define CHUNK_READ_WRITE 0 | ||
| 35 | #define CHUNK_READ_ONLY 1 | ||
| 36 | |||
| 37 | struct mem_chunk { | ||
| 38 | unsigned long addr; | ||
| 39 | unsigned long size; | ||
| 40 | int type; | ||
| 41 | }; | ||
| 42 | |||
| 43 | extern struct mem_chunk memory_chunk[]; | ||
| 44 | extern int memory_end_set; | 33 | extern int memory_end_set; |
| 45 | extern unsigned long memory_end; | 34 | extern unsigned long memory_end; |
| 35 | extern unsigned long max_physmem_end; | ||
| 46 | 36 | ||
| 47 | void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); | 37 | extern void detect_memory_memblock(void); |
| 48 | void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | ||
| 49 | unsigned long size); | ||
| 50 | 38 | ||
| 51 | /* | 39 | /* |
| 52 | * Machine features detected in head.S | 40 | * Machine features detected in head.S |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 21703f85b48d..4f1307962a95 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
| @@ -30,7 +30,6 @@ extern int smp_store_status(int cpu); | |||
| 30 | extern int smp_vcpu_scheduled(int cpu); | 30 | extern int smp_vcpu_scheduled(int cpu); |
| 31 | extern void smp_yield_cpu(int cpu); | 31 | extern void smp_yield_cpu(int cpu); |
| 32 | extern void smp_yield(void); | 32 | extern void smp_yield(void); |
| 33 | extern void smp_stop_cpu(void); | ||
| 34 | extern void smp_cpu_set_polarization(int cpu, int val); | 33 | extern void smp_cpu_set_polarization(int cpu, int val); |
| 35 | extern int smp_cpu_get_polarization(int cpu); | 34 | extern int smp_cpu_get_polarization(int cpu); |
| 36 | extern void smp_fill_possible_mask(void); | 35 | extern void smp_fill_possible_mask(void); |
| @@ -54,6 +53,8 @@ static inline void smp_yield_cpu(int cpu) { } | |||
| 54 | static inline void smp_yield(void) { } | 53 | static inline void smp_yield(void) { } |
| 55 | static inline void smp_fill_possible_mask(void) { } | 54 | static inline void smp_fill_possible_mask(void) { } |
| 56 | 55 | ||
| 56 | #endif /* CONFIG_SMP */ | ||
| 57 | |||
| 57 | static inline void smp_stop_cpu(void) | 58 | static inline void smp_stop_cpu(void) |
| 58 | { | 59 | { |
| 59 | u16 pcpu = stap(); | 60 | u16 pcpu = stap(); |
| @@ -64,8 +65,6 @@ static inline void smp_stop_cpu(void) | |||
| 64 | } | 65 | } |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | #endif /* CONFIG_SMP */ | ||
| 68 | |||
| 69 | #ifdef CONFIG_HOTPLUG_CPU | 68 | #ifdef CONFIG_HOTPLUG_CPU |
| 70 | extern int smp_rescan_cpus(void); | 69 | extern int smp_rescan_cpus(void); |
| 71 | extern void __noreturn cpu_die(void); | 70 | extern void __noreturn cpu_die(void); |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 83e5d216105e..96879f7ad6da 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
| @@ -11,18 +11,21 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
| 13 | 13 | ||
| 14 | #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) | ||
| 15 | |||
| 14 | extern int spin_retry; | 16 | extern int spin_retry; |
| 15 | 17 | ||
| 16 | static inline int | 18 | static inline int |
| 17 | _raw_compare_and_swap(volatile unsigned int *lock, | 19 | _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) |
| 18 | unsigned int old, unsigned int new) | ||
| 19 | { | 20 | { |
| 21 | unsigned int old_expected = old; | ||
| 22 | |||
| 20 | asm volatile( | 23 | asm volatile( |
| 21 | " cs %0,%3,%1" | 24 | " cs %0,%3,%1" |
| 22 | : "=d" (old), "=Q" (*lock) | 25 | : "=d" (old), "=Q" (*lock) |
| 23 | : "0" (old), "d" (new), "Q" (*lock) | 26 | : "0" (old), "d" (new), "Q" (*lock) |
| 24 | : "cc", "memory" ); | 27 | : "cc", "memory" ); |
| 25 | return old; | 28 | return old == old_expected; |
| 26 | } | 29 | } |
| 27 | 30 | ||
| 28 | /* | 31 | /* |
| @@ -34,57 +37,69 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
| 34 | * (the type definitions are in asm/spinlock_types.h) | 37 | * (the type definitions are in asm/spinlock_types.h) |
| 35 | */ | 38 | */ |
| 36 | 39 | ||
| 37 | #define arch_spin_is_locked(x) ((x)->owner_cpu != 0) | 40 | void arch_spin_lock_wait(arch_spinlock_t *); |
| 38 | #define arch_spin_unlock_wait(lock) \ | 41 | int arch_spin_trylock_retry(arch_spinlock_t *); |
| 39 | do { while (arch_spin_is_locked(lock)) \ | 42 | void arch_spin_relax(arch_spinlock_t *); |
| 40 | arch_spin_relax(lock); } while (0) | 43 | void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
| 41 | 44 | ||
| 42 | extern void arch_spin_lock_wait(arch_spinlock_t *); | 45 | static inline u32 arch_spin_lockval(int cpu) |
| 43 | extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); | 46 | { |
| 44 | extern int arch_spin_trylock_retry(arch_spinlock_t *); | 47 | return ~cpu; |
| 45 | extern void arch_spin_relax(arch_spinlock_t *lock); | 48 | } |
| 46 | 49 | ||
| 47 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | 50 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
| 48 | { | 51 | { |
| 49 | return lock.owner_cpu == 0; | 52 | return lock.lock == 0; |
| 50 | } | 53 | } |
| 51 | 54 | ||
| 52 | static inline void arch_spin_lock(arch_spinlock_t *lp) | 55 | static inline int arch_spin_is_locked(arch_spinlock_t *lp) |
| 53 | { | 56 | { |
| 54 | int old; | 57 | return ACCESS_ONCE(lp->lock) != 0; |
| 58 | } | ||
| 55 | 59 | ||
| 56 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 60 | static inline int arch_spin_trylock_once(arch_spinlock_t *lp) |
| 57 | if (likely(old == 0)) | 61 | { |
| 58 | return; | 62 | barrier(); |
| 59 | arch_spin_lock_wait(lp); | 63 | return likely(arch_spin_value_unlocked(*lp) && |
| 64 | _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); | ||
| 60 | } | 65 | } |
| 61 | 66 | ||
| 62 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, | 67 | static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) |
| 63 | unsigned long flags) | ||
| 64 | { | 68 | { |
| 65 | int old; | 69 | return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0); |
| 70 | } | ||
| 66 | 71 | ||
| 67 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 72 | static inline void arch_spin_lock(arch_spinlock_t *lp) |
| 68 | if (likely(old == 0)) | 73 | { |
| 69 | return; | 74 | if (!arch_spin_trylock_once(lp)) |
| 70 | arch_spin_lock_wait_flags(lp, flags); | 75 | arch_spin_lock_wait(lp); |
| 71 | } | 76 | } |
| 72 | 77 | ||
| 73 | static inline int arch_spin_trylock(arch_spinlock_t *lp) | 78 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, |
| 79 | unsigned long flags) | ||
| 74 | { | 80 | { |
| 75 | int old; | 81 | if (!arch_spin_trylock_once(lp)) |
| 82 | arch_spin_lock_wait_flags(lp, flags); | ||
| 83 | } | ||
| 76 | 84 | ||
| 77 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 85 | static inline int arch_spin_trylock(arch_spinlock_t *lp) |
| 78 | if (likely(old == 0)) | 86 | { |
| 79 | return 1; | 87 | if (!arch_spin_trylock_once(lp)) |
| 80 | return arch_spin_trylock_retry(lp); | 88 | return arch_spin_trylock_retry(lp); |
| 89 | return 1; | ||
| 81 | } | 90 | } |
| 82 | 91 | ||
| 83 | static inline void arch_spin_unlock(arch_spinlock_t *lp) | 92 | static inline void arch_spin_unlock(arch_spinlock_t *lp) |
| 84 | { | 93 | { |
| 85 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); | 94 | arch_spin_tryrelease_once(lp); |
| 95 | } | ||
| 96 | |||
| 97 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
| 98 | { | ||
| 99 | while (arch_spin_is_locked(lock)) | ||
| 100 | arch_spin_relax(lock); | ||
| 86 | } | 101 | } |
| 87 | 102 | ||
| 88 | /* | 103 | /* |
| 89 | * Read-write spinlocks, allowing multiple readers | 104 | * Read-write spinlocks, allowing multiple readers |
| 90 | * but only one writer. | 105 | * but only one writer. |
| @@ -115,42 +130,50 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); | |||
| 115 | extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); | 130 | extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
| 116 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); | 131 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); |
| 117 | 132 | ||
| 133 | static inline int arch_read_trylock_once(arch_rwlock_t *rw) | ||
| 134 | { | ||
| 135 | unsigned int old = ACCESS_ONCE(rw->lock); | ||
| 136 | return likely((int) old >= 0 && | ||
| 137 | _raw_compare_and_swap(&rw->lock, old, old + 1)); | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline int arch_write_trylock_once(arch_rwlock_t *rw) | ||
| 141 | { | ||
| 142 | unsigned int old = ACCESS_ONCE(rw->lock); | ||
| 143 | return likely(old == 0 && | ||
| 144 | _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); | ||
| 145 | } | ||
| 146 | |||
| 118 | static inline void arch_read_lock(arch_rwlock_t *rw) | 147 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 119 | { | 148 | { |
| 120 | unsigned int old; | 149 | if (!arch_read_trylock_once(rw)) |
| 121 | old = rw->lock & 0x7fffffffU; | ||
| 122 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) | ||
| 123 | _raw_read_lock_wait(rw); | 150 | _raw_read_lock_wait(rw); |
| 124 | } | 151 | } |
| 125 | 152 | ||
| 126 | static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) | 153 | static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
| 127 | { | 154 | { |
| 128 | unsigned int old; | 155 | if (!arch_read_trylock_once(rw)) |
| 129 | old = rw->lock & 0x7fffffffU; | ||
| 130 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) | ||
| 131 | _raw_read_lock_wait_flags(rw, flags); | 156 | _raw_read_lock_wait_flags(rw, flags); |
| 132 | } | 157 | } |
| 133 | 158 | ||
| 134 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 159 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 135 | { | 160 | { |
| 136 | unsigned int old, cmp; | 161 | unsigned int old; |
| 137 | 162 | ||
| 138 | old = rw->lock; | ||
| 139 | do { | 163 | do { |
| 140 | cmp = old; | 164 | old = ACCESS_ONCE(rw->lock); |
| 141 | old = _raw_compare_and_swap(&rw->lock, old, old - 1); | 165 | } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); |
| 142 | } while (cmp != old); | ||
| 143 | } | 166 | } |
| 144 | 167 | ||
| 145 | static inline void arch_write_lock(arch_rwlock_t *rw) | 168 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 146 | { | 169 | { |
| 147 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 170 | if (!arch_write_trylock_once(rw)) |
| 148 | _raw_write_lock_wait(rw); | 171 | _raw_write_lock_wait(rw); |
| 149 | } | 172 | } |
| 150 | 173 | ||
| 151 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) | 174 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
| 152 | { | 175 | { |
| 153 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 176 | if (!arch_write_trylock_once(rw)) |
| 154 | _raw_write_lock_wait_flags(rw, flags); | 177 | _raw_write_lock_wait_flags(rw, flags); |
| 155 | } | 178 | } |
| 156 | 179 | ||
| @@ -161,18 +184,16 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
| 161 | 184 | ||
| 162 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 185 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 163 | { | 186 | { |
| 164 | unsigned int old; | 187 | if (!arch_read_trylock_once(rw)) |
| 165 | old = rw->lock & 0x7fffffffU; | 188 | return _raw_read_trylock_retry(rw); |
| 166 | if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) | 189 | return 1; |
| 167 | return 1; | ||
| 168 | return _raw_read_trylock_retry(rw); | ||
| 169 | } | 190 | } |
| 170 | 191 | ||
| 171 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 192 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 172 | { | 193 | { |
| 173 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 194 | if (!arch_write_trylock_once(rw)) |
| 174 | return 1; | 195 | return _raw_write_trylock_retry(rw); |
| 175 | return _raw_write_trylock_retry(rw); | 196 | return 1; |
| 176 | } | 197 | } |
| 177 | 198 | ||
| 178 | #define arch_read_relax(lock) cpu_relax() | 199 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 9c76656a0af0..b2cd6ff7c2c5 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h | |||
| @@ -6,13 +6,13 @@ | |||
| 6 | #endif | 6 | #endif |
| 7 | 7 | ||
| 8 | typedef struct { | 8 | typedef struct { |
| 9 | volatile unsigned int owner_cpu; | 9 | unsigned int lock; |
| 10 | } __attribute__ ((aligned (4))) arch_spinlock_t; | 10 | } __attribute__ ((aligned (4))) arch_spinlock_t; |
| 11 | 11 | ||
| 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } |
| 13 | 13 | ||
| 14 | typedef struct { | 14 | typedef struct { |
| 15 | volatile unsigned int lock; | 15 | unsigned int lock; |
| 16 | } arch_rwlock_t; | 16 | } arch_rwlock_t; |
| 17 | 17 | ||
| 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index e759181357fc..29c81f82705e 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
| @@ -132,7 +132,6 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
| 132 | update_cr_regs(next); \ | 132 | update_cr_regs(next); \ |
| 133 | } \ | 133 | } \ |
| 134 | prev = __switch_to(prev,next); \ | 134 | prev = __switch_to(prev,next); \ |
| 135 | update_primary_asce(current); \ | ||
| 136 | } while (0) | 135 | } while (0) |
| 137 | 136 | ||
| 138 | #define finish_arch_switch(prev) do { \ | 137 | #define finish_arch_switch(prev) do { \ |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 777687055e7b..abad78d5b10c 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
| @@ -28,7 +28,7 @@ extern const unsigned int sys_call_table_emu[]; | |||
| 28 | static inline long syscall_get_nr(struct task_struct *task, | 28 | static inline long syscall_get_nr(struct task_struct *task, |
| 29 | struct pt_regs *regs) | 29 | struct pt_regs *regs) |
| 30 | { | 30 | { |
| 31 | return test_tsk_thread_flag(task, TIF_SYSCALL) ? | 31 | return test_pt_regs_flag(regs, PIF_SYSCALL) ? |
| 32 | (regs->int_code & 0xffff) : -1; | 32 | (regs->int_code & 0xffff) : -1; |
| 33 | } | 33 | } |
| 34 | 34 | ||
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 50630e6a35de..b833e9c0bfbf 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
| @@ -77,32 +77,22 @@ static inline struct thread_info *current_thread_info(void) | |||
| 77 | /* | 77 | /* |
| 78 | * thread information flags bit numbers | 78 | * thread information flags bit numbers |
| 79 | */ | 79 | */ |
| 80 | #define TIF_SYSCALL 0 /* inside a system call */ | 80 | #define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ |
| 81 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ | 81 | #define TIF_SIGPENDING 1 /* signal pending */ |
| 82 | #define TIF_SIGPENDING 2 /* signal pending */ | 82 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
| 83 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 83 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ |
| 84 | #define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ | 84 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ |
| 85 | #define TIF_ASCE 5 /* primary asce needs fixup / uaccess */ | 85 | #define TIF_SECCOMP 5 /* secure computing */ |
| 86 | #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ | 86 | #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ |
| 87 | #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ | 87 | #define TIF_31BIT 16 /* 32bit process */ |
| 88 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ | 88 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ |
| 89 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ | 89 | #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ |
| 90 | #define TIF_SECCOMP 10 /* secure computing */ | 90 | #define TIF_SINGLE_STEP 19 /* This task is single stepped */ |
| 91 | #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ | 91 | #define TIF_BLOCK_STEP 20 /* This task is block stepped */ |
| 92 | #define TIF_31BIT 17 /* 32bit process */ | ||
| 93 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | ||
| 94 | #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ | ||
| 95 | #define TIF_SINGLE_STEP 20 /* This task is single stepped */ | ||
| 96 | #define TIF_BLOCK_STEP 21 /* This task is block stepped */ | ||
| 97 | 92 | ||
| 98 | #define _TIF_SYSCALL (1<<TIF_SYSCALL) | ||
| 99 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 93 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
| 100 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 94 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) |
| 101 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 95 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) |
| 102 | #define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) | ||
| 103 | #define _TIF_ASCE (1<<TIF_ASCE) | ||
| 104 | #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) | ||
| 105 | #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) | ||
| 106 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 96 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
| 107 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 97 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
| 108 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 98 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 1be64a1506d0..cd4c68e0398d 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
| @@ -132,6 +132,34 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, | |||
| 132 | #define __copy_to_user_inatomic __copy_to_user | 132 | #define __copy_to_user_inatomic __copy_to_user |
| 133 | #define __copy_from_user_inatomic __copy_from_user | 133 | #define __copy_from_user_inatomic __copy_from_user |
| 134 | 134 | ||
| 135 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
| 136 | |||
| 137 | #define __put_get_user_asm(to, from, size, spec) \ | ||
| 138 | ({ \ | ||
| 139 | register unsigned long __reg0 asm("0") = spec; \ | ||
| 140 | int __rc; \ | ||
| 141 | \ | ||
| 142 | asm volatile( \ | ||
| 143 | "0: mvcos %1,%3,%2\n" \ | ||
| 144 | "1: xr %0,%0\n" \ | ||
| 145 | "2:\n" \ | ||
| 146 | ".pushsection .fixup, \"ax\"\n" \ | ||
| 147 | "3: lhi %0,%5\n" \ | ||
| 148 | " jg 2b\n" \ | ||
| 149 | ".popsection\n" \ | ||
| 150 | EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ | ||
| 151 | : "=d" (__rc), "=Q" (*(to)) \ | ||
| 152 | : "d" (size), "Q" (*(from)), \ | ||
| 153 | "d" (__reg0), "K" (-EFAULT) \ | ||
| 154 | : "cc"); \ | ||
| 155 | __rc; \ | ||
| 156 | }) | ||
| 157 | |||
| 158 | #define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL) | ||
| 159 | #define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL) | ||
| 160 | |||
| 161 | #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ | ||
| 162 | |||
| 135 | static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) | 163 | static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) |
| 136 | { | 164 | { |
| 137 | size = __copy_to_user(ptr, x, size); | 165 | size = __copy_to_user(ptr, x, size); |
| @@ -144,6 +172,8 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s | |||
| 144 | return size ? -EFAULT : 0; | 172 | return size ? -EFAULT : 0; |
| 145 | } | 173 | } |
| 146 | 174 | ||
| 175 | #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ | ||
| 176 | |||
| 147 | /* | 177 | /* |
| 148 | * These are the main single-value transfer routines. They automatically | 178 | * These are the main single-value transfer routines. They automatically |
| 149 | * use the right size if we just have the right pointer type. | 179 | * use the right size if we just have the right pointer type. |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cc10cdd4d6a2..0c070c44cde2 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
| @@ -50,6 +50,7 @@ int main(void) | |||
| 50 | DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); | 50 | DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); |
| 51 | DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); | 51 | DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); |
| 52 | DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); | 52 | DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); |
| 53 | DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags)); | ||
| 53 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); | 54 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); |
| 54 | BLANK(); | 55 | BLANK(); |
| 55 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); | 56 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
| @@ -115,6 +116,7 @@ int main(void) | |||
| 115 | DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); | 116 | DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); |
| 116 | DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); | 117 | DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); |
| 117 | DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); | 118 | DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); |
| 119 | DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags)); | ||
| 118 | DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); | 120 | DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); |
| 119 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); | 121 | DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); |
| 120 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); | 122 | DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); |
| @@ -142,7 +144,6 @@ int main(void) | |||
| 142 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | 144 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); |
| 143 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); | 145 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); |
| 144 | DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); | 146 | DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); |
| 145 | DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); | ||
| 146 | DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); | 147 | DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); |
| 147 | BLANK(); | 148 | BLANK(); |
| 148 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); | 149 | DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 7df5ed9f44d7..f204d6920368 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
| @@ -213,7 +213,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) | |||
| 213 | sizeof(current->thread.fp_regs)); | 213 | sizeof(current->thread.fp_regs)); |
| 214 | 214 | ||
| 215 | restore_fp_regs(current->thread.fp_regs.fprs); | 215 | restore_fp_regs(current->thread.fp_regs.fprs); |
| 216 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ | 216 | clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ |
| 217 | return 0; | 217 | return 0; |
| 218 | } | 218 | } |
| 219 | 219 | ||
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index d7658c4b2ed5..a3b9150e6802 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
| 14 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
| 15 | #include <linux/elf.h> | 15 | #include <linux/elf.h> |
| 16 | #include <linux/memblock.h> | ||
| 16 | #include <asm/os_info.h> | 17 | #include <asm/os_info.h> |
| 17 | #include <asm/elf.h> | 18 | #include <asm/elf.h> |
| 18 | #include <asm/ipl.h> | 19 | #include <asm/ipl.h> |
| @@ -22,6 +23,24 @@ | |||
| 22 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) | 23 | #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) |
| 23 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) | 24 | #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) |
| 24 | 25 | ||
| 26 | static struct memblock_region oldmem_region; | ||
| 27 | |||
| 28 | static struct memblock_type oldmem_type = { | ||
| 29 | .cnt = 1, | ||
| 30 | .max = 1, | ||
| 31 | .total_size = 0, | ||
| 32 | .regions = &oldmem_region, | ||
| 33 | }; | ||
| 34 | |||
| 35 | #define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ | ||
| 36 | for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \ | ||
| 37 | &oldmem_type, p_start, \ | ||
| 38 | p_end, p_nid); \ | ||
| 39 | i != (u64)ULLONG_MAX; \ | ||
| 40 | __next_mem_range(&i, nid, &memblock.physmem, \ | ||
| 41 | &oldmem_type, \ | ||
| 42 | p_start, p_end, p_nid)) | ||
| 43 | |||
| 25 | struct dump_save_areas dump_save_areas; | 44 | struct dump_save_areas dump_save_areas; |
| 26 | 45 | ||
| 27 | /* | 46 | /* |
| @@ -264,19 +283,6 @@ static void *kzalloc_panic(int len) | |||
| 264 | } | 283 | } |
| 265 | 284 | ||
| 266 | /* | 285 | /* |
| 267 | * Get memory layout and create hole for oldmem | ||
| 268 | */ | ||
| 269 | static struct mem_chunk *get_memory_layout(void) | ||
| 270 | { | ||
| 271 | struct mem_chunk *chunk_array; | ||
| 272 | |||
| 273 | chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
| 274 | detect_memory_layout(chunk_array, 0); | ||
| 275 | create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE); | ||
| 276 | return chunk_array; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Initialize ELF note | 286 | * Initialize ELF note |
| 281 | */ | 287 | */ |
| 282 | static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, | 288 | static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, |
| @@ -490,52 +496,33 @@ static int get_cpu_cnt(void) | |||
| 490 | */ | 496 | */ |
| 491 | static int get_mem_chunk_cnt(void) | 497 | static int get_mem_chunk_cnt(void) |
| 492 | { | 498 | { |
| 493 | struct mem_chunk *chunk_array, *mem_chunk; | 499 | int cnt = 0; |
| 494 | int i, cnt = 0; | 500 | u64 idx; |
| 495 | 501 | ||
| 496 | chunk_array = get_memory_layout(); | 502 | for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL) |
| 497 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 498 | mem_chunk = &chunk_array[i]; | ||
| 499 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
| 500 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
| 501 | continue; | ||
| 502 | if (mem_chunk->size == 0) | ||
| 503 | continue; | ||
| 504 | cnt++; | 503 | cnt++; |
| 505 | } | ||
| 506 | kfree(chunk_array); | ||
| 507 | return cnt; | 504 | return cnt; |
| 508 | } | 505 | } |
| 509 | 506 | ||
| 510 | /* | 507 | /* |
| 511 | * Initialize ELF loads (new kernel) | 508 | * Initialize ELF loads (new kernel) |
| 512 | */ | 509 | */ |
| 513 | static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) | 510 | static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) |
| 514 | { | 511 | { |
| 515 | struct mem_chunk *chunk_array, *mem_chunk; | 512 | phys_addr_t start, end; |
| 516 | int i; | 513 | u64 idx; |
| 517 | 514 | ||
| 518 | chunk_array = get_memory_layout(); | 515 | for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) { |
| 519 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 516 | phdr->p_filesz = end - start; |
| 520 | mem_chunk = &chunk_array[i]; | ||
| 521 | if (mem_chunk->size == 0) | ||
| 522 | continue; | ||
| 523 | if (chunk_array[i].type != CHUNK_READ_WRITE && | ||
| 524 | chunk_array[i].type != CHUNK_READ_ONLY) | ||
| 525 | continue; | ||
| 526 | else | ||
| 527 | phdr->p_filesz = mem_chunk->size; | ||
| 528 | phdr->p_type = PT_LOAD; | 517 | phdr->p_type = PT_LOAD; |
| 529 | phdr->p_offset = mem_chunk->addr; | 518 | phdr->p_offset = start; |
| 530 | phdr->p_vaddr = mem_chunk->addr; | 519 | phdr->p_vaddr = start; |
| 531 | phdr->p_paddr = mem_chunk->addr; | 520 | phdr->p_paddr = start; |
| 532 | phdr->p_memsz = mem_chunk->size; | 521 | phdr->p_memsz = end - start; |
| 533 | phdr->p_flags = PF_R | PF_W | PF_X; | 522 | phdr->p_flags = PF_R | PF_W | PF_X; |
| 534 | phdr->p_align = PAGE_SIZE; | 523 | phdr->p_align = PAGE_SIZE; |
| 535 | phdr++; | 524 | phdr++; |
| 536 | } | 525 | } |
| 537 | kfree(chunk_array); | ||
| 538 | return i; | ||
| 539 | } | 526 | } |
| 540 | 527 | ||
| 541 | /* | 528 | /* |
| @@ -584,6 +571,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) | |||
| 584 | /* If we cannot get HSA size for zfcpdump return error */ | 571 | /* If we cannot get HSA size for zfcpdump return error */ |
| 585 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) | 572 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) |
| 586 | return -ENODEV; | 573 | return -ENODEV; |
| 574 | |||
| 575 | /* For kdump, exclude previous crashkernel memory */ | ||
| 576 | if (OLDMEM_BASE) { | ||
| 577 | oldmem_region.base = OLDMEM_BASE; | ||
| 578 | oldmem_region.size = OLDMEM_SIZE; | ||
| 579 | oldmem_type.total_size = OLDMEM_SIZE; | ||
| 580 | } | ||
| 581 | |||
| 587 | mem_chunk_cnt = get_mem_chunk_cnt(); | 582 | mem_chunk_cnt = get_mem_chunk_cnt(); |
| 588 | 583 | ||
| 589 | alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + | 584 | alloc_size = 0x1000 + get_cpu_cnt() * 0x300 + |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index a734f3585ceb..0dff972a169c 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
| @@ -258,13 +258,19 @@ static __init void setup_topology(void) | |||
| 258 | static void early_pgm_check_handler(void) | 258 | static void early_pgm_check_handler(void) |
| 259 | { | 259 | { |
| 260 | const struct exception_table_entry *fixup; | 260 | const struct exception_table_entry *fixup; |
| 261 | unsigned long cr0, cr0_new; | ||
| 261 | unsigned long addr; | 262 | unsigned long addr; |
| 262 | 263 | ||
| 263 | addr = S390_lowcore.program_old_psw.addr; | 264 | addr = S390_lowcore.program_old_psw.addr; |
| 264 | fixup = search_exception_tables(addr & PSW_ADDR_INSN); | 265 | fixup = search_exception_tables(addr & PSW_ADDR_INSN); |
| 265 | if (!fixup) | 266 | if (!fixup) |
| 266 | disabled_wait(0); | 267 | disabled_wait(0); |
| 268 | /* Disable low address protection before storing into lowcore. */ | ||
| 269 | __ctl_store(cr0, 0, 0); | ||
| 270 | cr0_new = cr0 & ~(1UL << 28); | ||
| 271 | __ctl_load(cr0_new, 0, 0); | ||
| 267 | S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; | 272 | S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; |
| 273 | __ctl_load(cr0, 0, 0); | ||
| 268 | } | 274 | } |
| 269 | 275 | ||
| 270 | static noinline __init void setup_lowcore_early(void) | 276 | static noinline __init void setup_lowcore_early(void) |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 1662038516c0..18e5af848f9a 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
| 12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
| 13 | #include <asm/processor.h> | ||
| 13 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
| 14 | #include <asm/errno.h> | 15 | #include <asm/errno.h> |
| 15 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
| @@ -37,18 +38,16 @@ __PT_R13 = __PT_GPRS + 524 | |||
| 37 | __PT_R14 = __PT_GPRS + 56 | 38 | __PT_R14 = __PT_GPRS + 56 |
| 38 | __PT_R15 = __PT_GPRS + 60 | 39 | __PT_R15 = __PT_GPRS + 60 |
| 39 | 40 | ||
| 40 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | ||
| 41 | _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) | ||
| 42 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | ||
| 43 | _TIF_MCCK_PENDING | _TIF_ASCE) | ||
| 44 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | ||
| 45 | _TIF_SYSCALL_TRACEPOINT) | ||
| 46 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) | ||
| 47 | |||
| 48 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | 41 | STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER |
| 49 | STACK_SIZE = 1 << STACK_SHIFT | 42 | STACK_SIZE = 1 << STACK_SHIFT |
| 50 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE |
| 51 | 44 | ||
| 45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) | ||
| 46 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | ||
| 47 | _TIF_SYSCALL_TRACEPOINT) | ||
| 48 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) | ||
| 49 | _PIF_WORK = (_PIF_PER_TRAP) | ||
| 50 | |||
| 52 | #define BASED(name) name-system_call(%r13) | 51 | #define BASED(name) name-system_call(%r13) |
| 53 | 52 | ||
| 54 | .macro TRACE_IRQS_ON | 53 | .macro TRACE_IRQS_ON |
| @@ -160,13 +159,7 @@ ENTRY(__switch_to) | |||
| 160 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 159 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
| 161 | mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next | 160 | mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next |
| 162 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next | 161 | l %r15,__THREAD_ksp(%r3) # load kernel stack of next |
| 163 | lhi %r6,_TIF_TRANSFER # transfer TIF bits | 162 | lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
| 164 | n %r6,__TI_flags(%r4) # isolate TIF bits | ||
| 165 | jz 0f | ||
| 166 | o %r6,__TI_flags(%r5) # set TIF bits of next | ||
| 167 | st %r6,__TI_flags(%r5) | ||
| 168 | ni __TI_flags+3(%r4),255-_TIF_TRANSFER # clear TIF bits of prev | ||
| 169 | 0: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | ||
| 170 | br %r14 | 163 | br %r14 |
| 171 | 164 | ||
| 172 | __critical_start: | 165 | __critical_start: |
| @@ -181,6 +174,7 @@ sysc_stm: | |||
| 181 | stm %r8,%r15,__LC_SAVE_AREA_SYNC | 174 | stm %r8,%r15,__LC_SAVE_AREA_SYNC |
| 182 | l %r12,__LC_THREAD_INFO | 175 | l %r12,__LC_THREAD_INFO |
| 183 | l %r13,__LC_SVC_NEW_PSW+4 | 176 | l %r13,__LC_SVC_NEW_PSW+4 |
| 177 | lhi %r14,_PIF_SYSCALL | ||
| 184 | sysc_per: | 178 | sysc_per: |
| 185 | l %r15,__LC_KERNEL_STACK | 179 | l %r15,__LC_KERNEL_STACK |
| 186 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 180 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
| @@ -190,8 +184,8 @@ sysc_vtime: | |||
| 190 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC | 184 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC |
| 191 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW | 185 | mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW |
| 192 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 186 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
| 187 | st %r14,__PT_FLAGS(%r11) | ||
| 193 | sysc_do_svc: | 188 | sysc_do_svc: |
| 194 | oi __TI_flags+3(%r12),_TIF_SYSCALL | ||
| 195 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 189 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table |
| 196 | lh %r8,__PT_INT_CODE+2(%r11) | 190 | lh %r8,__PT_INT_CODE+2(%r11) |
| 197 | sla %r8,2 # shift and test for svc0 | 191 | sla %r8,2 # shift and test for svc0 |
| @@ -207,7 +201,7 @@ sysc_nr_ok: | |||
| 207 | st %r2,__PT_ORIG_GPR2(%r11) | 201 | st %r2,__PT_ORIG_GPR2(%r11) |
| 208 | st %r7,STACK_FRAME_OVERHEAD(%r15) | 202 | st %r7,STACK_FRAME_OVERHEAD(%r15) |
| 209 | l %r9,0(%r8,%r10) # get system call addr. | 203 | l %r9,0(%r8,%r10) # get system call addr. |
| 210 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 | 204 | tm __TI_flags+3(%r12),_TIF_TRACE |
| 211 | jnz sysc_tracesys | 205 | jnz sysc_tracesys |
| 212 | basr %r14,%r9 # call sys_xxxx | 206 | basr %r14,%r9 # call sys_xxxx |
| 213 | st %r2,__PT_R2(%r11) # store return value | 207 | st %r2,__PT_R2(%r11) # store return value |
| @@ -217,9 +211,12 @@ sysc_return: | |||
| 217 | sysc_tif: | 211 | sysc_tif: |
| 218 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 212 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
| 219 | jno sysc_restore | 213 | jno sysc_restore |
| 220 | tm __TI_flags+3(%r12),_TIF_WORK_SVC | 214 | tm __PT_FLAGS+3(%r11),_PIF_WORK |
| 221 | jnz sysc_work # check for work | 215 | jnz sysc_work |
| 222 | ni __TI_flags+3(%r12),255-_TIF_SYSCALL | 216 | tm __TI_flags+3(%r12),_TIF_WORK |
| 217 | jnz sysc_work # check for thread work | ||
| 218 | tm __LC_CPU_FLAGS+3,_CIF_WORK | ||
| 219 | jnz sysc_work | ||
| 223 | sysc_restore: | 220 | sysc_restore: |
| 224 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 221 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
| 225 | stpt __LC_EXIT_TIMER | 222 | stpt __LC_EXIT_TIMER |
| @@ -231,17 +228,17 @@ sysc_done: | |||
| 231 | # One of the work bits is on. Find out which one. | 228 | # One of the work bits is on. Find out which one. |
| 232 | # | 229 | # |
| 233 | sysc_work: | 230 | sysc_work: |
| 234 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 231 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING |
| 235 | jo sysc_mcck_pending | 232 | jo sysc_mcck_pending |
| 236 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 233 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
| 237 | jo sysc_reschedule | 234 | jo sysc_reschedule |
| 238 | tm __TI_flags+3(%r12),_TIF_PER_TRAP | 235 | tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP |
| 239 | jo sysc_singlestep | 236 | jo sysc_singlestep |
| 240 | tm __TI_flags+3(%r12),_TIF_SIGPENDING | 237 | tm __TI_flags+3(%r12),_TIF_SIGPENDING |
| 241 | jo sysc_sigpending | 238 | jo sysc_sigpending |
| 242 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 239 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
| 243 | jo sysc_notify_resume | 240 | jo sysc_notify_resume |
| 244 | tm __TI_flags+3(%r12),_TIF_ASCE | 241 | tm __LC_CPU_FLAGS+3,_CIF_ASCE |
| 245 | jo sysc_uaccess | 242 | jo sysc_uaccess |
| 246 | j sysc_return # beware of critical section cleanup | 243 | j sysc_return # beware of critical section cleanup |
| 247 | 244 | ||
| @@ -254,7 +251,7 @@ sysc_reschedule: | |||
| 254 | br %r1 # call schedule | 251 | br %r1 # call schedule |
| 255 | 252 | ||
| 256 | # | 253 | # |
| 257 | # _TIF_MCCK_PENDING is set, call handler | 254 | # _CIF_MCCK_PENDING is set, call handler |
| 258 | # | 255 | # |
| 259 | sysc_mcck_pending: | 256 | sysc_mcck_pending: |
| 260 | l %r1,BASED(.Lhandle_mcck) | 257 | l %r1,BASED(.Lhandle_mcck) |
| @@ -262,10 +259,10 @@ sysc_mcck_pending: | |||
| 262 | br %r1 # TIF bit will be cleared by handler | 259 | br %r1 # TIF bit will be cleared by handler |
| 263 | 260 | ||
| 264 | # | 261 | # |
| 265 | # _TIF_ASCE is set, load user space asce | 262 | # _CIF_ASCE is set, load user space asce |
| 266 | # | 263 | # |
| 267 | sysc_uaccess: | 264 | sysc_uaccess: |
| 268 | ni __TI_flags+3(%r12),255-_TIF_ASCE | 265 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE |
| 269 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 266 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce |
| 270 | j sysc_return | 267 | j sysc_return |
| 271 | 268 | ||
| @@ -276,7 +273,7 @@ sysc_sigpending: | |||
| 276 | lr %r2,%r11 # pass pointer to pt_regs | 273 | lr %r2,%r11 # pass pointer to pt_regs |
| 277 | l %r1,BASED(.Ldo_signal) | 274 | l %r1,BASED(.Ldo_signal) |
| 278 | basr %r14,%r1 # call do_signal | 275 | basr %r14,%r1 # call do_signal |
| 279 | tm __TI_flags+3(%r12),_TIF_SYSCALL | 276 | tm __PT_FLAGS+3(%r11),_PIF_SYSCALL |
| 280 | jno sysc_return | 277 | jno sysc_return |
| 281 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments | 278 | lm %r2,%r7,__PT_R2(%r11) # load svc arguments |
| 282 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table | 279 | l %r10,__TI_sysc_table(%r12) # 31 bit system call table |
| @@ -297,10 +294,10 @@ sysc_notify_resume: | |||
| 297 | br %r1 # call do_notify_resume | 294 | br %r1 # call do_notify_resume |
| 298 | 295 | ||
| 299 | # | 296 | # |
| 300 | # _TIF_PER_TRAP is set, call do_per_trap | 297 | # _PIF_PER_TRAP is set, call do_per_trap |
| 301 | # | 298 | # |
| 302 | sysc_singlestep: | 299 | sysc_singlestep: |
| 303 | ni __TI_flags+3(%r12),255-_TIF_PER_TRAP | 300 | ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP |
| 304 | lr %r2,%r11 # pass pointer to pt_regs | 301 | lr %r2,%r11 # pass pointer to pt_regs |
| 305 | l %r1,BASED(.Ldo_per_trap) | 302 | l %r1,BASED(.Ldo_per_trap) |
| 306 | la %r14,BASED(sysc_return) | 303 | la %r14,BASED(sysc_return) |
| @@ -330,7 +327,7 @@ sysc_tracego: | |||
| 330 | basr %r14,%r9 # call sys_xxx | 327 | basr %r14,%r9 # call sys_xxx |
| 331 | st %r2,__PT_R2(%r11) # store return value | 328 | st %r2,__PT_R2(%r11) # store return value |
| 332 | sysc_tracenogo: | 329 | sysc_tracenogo: |
| 333 | tm __TI_flags+2(%r12),_TIF_TRACE >> 8 | 330 | tm __TI_flags+3(%r12),_TIF_TRACE |
| 334 | jz sysc_return | 331 | jz sysc_return |
| 335 | l %r1,BASED(.Ltrace_exit) | 332 | l %r1,BASED(.Ltrace_exit) |
| 336 | lr %r2,%r11 # pass pointer to pt_regs | 333 | lr %r2,%r11 # pass pointer to pt_regs |
| @@ -384,12 +381,13 @@ ENTRY(pgm_check_handler) | |||
| 384 | stm %r8,%r9,__PT_PSW(%r11) | 381 | stm %r8,%r9,__PT_PSW(%r11) |
| 385 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC | 382 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC |
| 386 | mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE | 383 | mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE |
| 384 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | ||
| 387 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 385 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
| 388 | jz 0f | 386 | jz 0f |
| 389 | l %r1,__TI_task(%r12) | 387 | l %r1,__TI_task(%r12) |
| 390 | tmh %r8,0x0001 # kernel per event ? | 388 | tmh %r8,0x0001 # kernel per event ? |
| 391 | jz pgm_kprobe | 389 | jz pgm_kprobe |
| 392 | oi __TI_flags+3(%r12),_TIF_PER_TRAP | 390 | oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP |
| 393 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS | 391 | mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS |
| 394 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE | 392 | mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE |
| 395 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID | 393 | mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID |
| @@ -420,9 +418,9 @@ pgm_kprobe: | |||
| 420 | # single stepped system call | 418 | # single stepped system call |
| 421 | # | 419 | # |
| 422 | pgm_svcper: | 420 | pgm_svcper: |
| 423 | oi __TI_flags+3(%r12),_TIF_PER_TRAP | ||
| 424 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW | 421 | mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW |
| 425 | mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) | 422 | mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) |
| 423 | lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | ||
| 426 | lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs | 424 | lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs |
| 427 | 425 | ||
| 428 | /* | 426 | /* |
| @@ -445,6 +443,7 @@ io_skip: | |||
| 445 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC | 443 | mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC |
| 446 | stm %r8,%r9,__PT_PSW(%r11) | 444 | stm %r8,%r9,__PT_PSW(%r11) |
| 447 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | 445 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID |
| 446 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | ||
| 448 | TRACE_IRQS_OFF | 447 | TRACE_IRQS_OFF |
| 449 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
| 450 | io_loop: | 449 | io_loop: |
| @@ -466,8 +465,10 @@ io_return: | |||
| 466 | LOCKDEP_SYS_EXIT | 465 | LOCKDEP_SYS_EXIT |
| 467 | TRACE_IRQS_ON | 466 | TRACE_IRQS_ON |
| 468 | io_tif: | 467 | io_tif: |
| 469 | tm __TI_flags+3(%r12),_TIF_WORK_INT | 468 | tm __TI_flags+3(%r12),_TIF_WORK |
| 470 | jnz io_work # there is work to do (signals etc.) | 469 | jnz io_work # there is work to do (signals etc.) |
| 470 | tm __LC_CPU_FLAGS+3,_CIF_WORK | ||
| 471 | jnz io_work | ||
| 471 | io_restore: | 472 | io_restore: |
| 472 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) | 473 | mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) |
| 473 | stpt __LC_EXIT_TIMER | 474 | stpt __LC_EXIT_TIMER |
| @@ -477,7 +478,7 @@ io_done: | |||
| 477 | 478 | ||
| 478 | # | 479 | # |
| 479 | # There is work todo, find out in which context we have been interrupted: | 480 | # There is work todo, find out in which context we have been interrupted: |
| 480 | # 1) if we return to user space we can do all _TIF_WORK_INT work | 481 | # 1) if we return to user space we can do all _TIF_WORK work |
| 481 | # 2) if we return to kernel code and preemptive scheduling is enabled check | 482 | # 2) if we return to kernel code and preemptive scheduling is enabled check |
| 482 | # the preemption counter and if it is zero call preempt_schedule_irq | 483 | # the preemption counter and if it is zero call preempt_schedule_irq |
| 483 | # Before any work can be done, a switch to the kernel stack is required. | 484 | # Before any work can be done, a switch to the kernel stack is required. |
| @@ -520,11 +521,9 @@ io_work_user: | |||
| 520 | 521 | ||
| 521 | # | 522 | # |
| 522 | # One of the work bits is on. Find out which one. | 523 | # One of the work bits is on. Find out which one. |
| 523 | # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED | ||
| 524 | # and _TIF_MCCK_PENDING | ||
| 525 | # | 524 | # |
| 526 | io_work_tif: | 525 | io_work_tif: |
| 527 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 526 | tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING |
| 528 | jo io_mcck_pending | 527 | jo io_mcck_pending |
| 529 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED | 528 | tm __TI_flags+3(%r12),_TIF_NEED_RESCHED |
| 530 | jo io_reschedule | 529 | jo io_reschedule |
| @@ -532,12 +531,12 @@ io_work_tif: | |||
| 532 | jo io_sigpending | 531 | jo io_sigpending |
| 533 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME | 532 | tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME |
| 534 | jo io_notify_resume | 533 | jo io_notify_resume |
| 535 | tm __TI_flags+3(%r12),_TIF_ASCE | 534 | tm __LC_CPU_FLAGS+3,_CIF_ASCE |
| 536 | jo io_uaccess | 535 | jo io_uaccess |
| 537 | j io_return # beware of critical section cleanup | 536 | j io_return # beware of critical section cleanup |
| 538 | 537 | ||
| 539 | # | 538 | # |
| 540 | # _TIF_MCCK_PENDING is set, call handler | 539 | # _CIF_MCCK_PENDING is set, call handler |
| 541 | # | 540 | # |
| 542 | io_mcck_pending: | 541 | io_mcck_pending: |
| 543 | # TRACE_IRQS_ON already done at io_return | 542 | # TRACE_IRQS_ON already done at io_return |
| @@ -547,10 +546,10 @@ io_mcck_pending: | |||
| 547 | j io_return | 546 | j io_return |
| 548 | 547 | ||
| 549 | # | 548 | # |
| 550 | # _TIF_ASCE is set, load user space asce | 549 | # _CIF_ASCE is set, load user space asce |
| 551 | # | 550 | # |
| 552 | io_uaccess: | 551 | io_uaccess: |
| 553 | ni __TI_flags+3(%r12),255-_TIF_ASCE | 552 | ni __LC_CPU_FLAGS+3,255-_CIF_ASCE |
| 554 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce | 553 | lctl %c1,%c1,__LC_USER_ASCE # load primary asce |
| 555 | j io_return | 554 | j io_return |
| 556 | 555 | ||
| @@ -613,6 +612,7 @@ ext_skip: | |||
| 613 | stm %r8,%r9,__PT_PSW(%r11) | 612 | stm %r8,%r9,__PT_PSW(%r11) |
| 614 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | 613 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR |
| 615 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | 614 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS |
| 615 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | ||
| 616 | TRACE_IRQS_OFF | 616 | TRACE_IRQS_OFF |
| 617 | l %r1,BASED(.Ldo_IRQ) | 617 | l %r1,BASED(.Ldo_IRQ) |
| 618 | lr %r2,%r11 # pass pointer to pt_regs | 618 | lr %r2,%r11 # pass pointer to pt_regs |
| @@ -677,6 +677,7 @@ mcck_skip: | |||
| 677 | stm %r0,%r7,__PT_R0(%r11) | 677 | stm %r0,%r7,__PT_R0(%r11) |
| 678 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 | 678 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 |
| 679 | stm %r8,%r9,__PT_PSW(%r11) | 679 | stm %r8,%r9,__PT_PSW(%r11) |
| 680 | xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) | ||
| 680 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 681 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
| 681 | l %r1,BASED(.Ldo_machine_check) | 682 | l %r1,BASED(.Ldo_machine_check) |
| 682 | lr %r2,%r11 # pass pointer to pt_regs | 683 | lr %r2,%r11 # pass pointer to pt_regs |
| @@ -689,7 +690,7 @@ mcck_skip: | |||
| 689 | la %r11,STACK_FRAME_OVERHEAD(%r15) | 690 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
| 690 | lr %r15,%r1 | 691 | lr %r15,%r1 |
| 691 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | 692 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
| 692 | tm __TI_flags+3(%r12),_TIF_MCCK_PENDING | 693 | tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING |
| 693 | jno mcck_return | 694 | jno mcck_return |
| 694 | TRACE_IRQS_OFF | 695 | TRACE_IRQS_OFF |
| 695 | l %r1,BASED(.Lhandle_mcck) | 696 | l %r1,BASED(.Lhandle_mcck) |
| @@ -842,6 +843,8 @@ cleanup_system_call: | |||
| 842 | stm %r0,%r7,__PT_R0(%r9) | 843 | stm %r0,%r7,__PT_R0(%r9) |
| 843 | mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW | 844 | mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW |
| 844 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC | 845 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC |
| 846 | xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) | ||
| 847 | mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL | ||
| 845 | # setup saved register 15 | 848 | # setup saved register 15 |
| 846 | st %r15,28(%r11) # r15 stack pointer | 849 | st %r15,28(%r11) # r15 stack pointer |
| 847 | # set new psw address and exit | 850 | # set new psw address and exit |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5963e43618bb..c41f3f906720 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
| @@ -42,13 +42,11 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER | |||
| 42 | STACK_SIZE = 1 << STACK_SHIFT | 42 | STACK_SIZE = 1 << STACK_SHIFT |
| 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE | 43 | STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE |
| 44 | 44 | ||
| 45 | _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 45 | _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) |
| 46 | _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE) | 46 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
| 47 | _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | 47 | _TIF_SYSCALL_TRACEPOINT) |
| 48 | _TIF_MCCK_PENDING | _TIF_ASCE) | 48 | _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) |
| 49 | _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 49 | _PIF_WORK = (_PIF_PER_TRAP) |
| 50 | _TIF_SYSCALL_TRACEPOINT) | ||
| 51 | _TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT) | ||
| 52 | 50 | ||
| 53 | #define BASED(name) name-system_call(%r13) | 51 | #define BASED(name) name-system_call(%r13) |
| 54 | 52 | ||
| @@ -190,13 +188,7 @@ ENTRY(__switch_to) | |||
| 190 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 188 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
| 191 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next | 189 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next |
| 192 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next | 190 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next |
| 193 | llill %r6,_TIF_TRANSFER # transfer TIF bits | 191 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
| 194 | ng %r6,__TI_flags(%r4) # isolate TIF bits | ||
| 195 | jz 0f | ||
| 196 | og %r6,__TI_flags(%r5) # set TIF bits of next | ||
| 197 | stg %r6,__TI_flags(%r5) | ||
| 198 | ni __TI_flags+7(%r4),255-_TIF_TRANSFER # clear TIF bits of prev | ||
| 199 | 0: lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | ||
| 200 | br %r14 | 192 | br %r14 |
| 201 | 193 | ||
| 202 | __critical_start: | 194 | __critical_start: |
| @@ -211,6 +203,7 @@ sysc_stmg: | |||
| 211 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC | 203 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
| 212 | lg %r10,__LC_LAST_BREAK | 204 | lg %r10,__LC_LAST_BREAK |
| 213 | lg %r12,__LC_THREAD_INFO | 205 | lg %r12,__LC_THREAD_INFO |
| 206 | lghi %r14,_PIF_SYSCALL | ||
| 214 | sysc_per: | 207 | sysc_per: |
| 215 | lg %r15,__LC_KERNEL_STACK | 208 | lg %r15,__LC_KERNEL_STACK |
| 216 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | 209 | la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs |
| @@ -221,8 +214,8 @@ sysc_vtime: | |||
| 221 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC | 214 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
| 222 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW | 215 | mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW |
| 223 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC | 216 | mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC |
| 217 | stg %r14,__PT_FLAGS(%r11) | ||
| 224 | sysc_do_svc: | 218 | sysc_do_svc: |
| 225 | oi __TI_flags+7(%r12),_TIF_SYSCALL | ||
| 226 | lg %r10,__TI_sysc_table(%r12) # address of system call table | 219 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
| 227 | llgh %r8,__PT_INT_CODE+2(%r11) | 220 | llgh %r8,__PT_INT_CODE+2(%r11) |
| 228 | slag %r8,%r8,2 # shift and test for svc 0 | 221 | slag %r8,%r8,2 # shift and test for svc 0 |
| @@ -238,7 +231,7 @@ sysc_nr_ok: | |||
| 238 | stg %r2,__PT_ORIG_GPR2(%r11) | 231 | stg %r2,__PT_ORIG_GPR2(%r11) |
| 239 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | 232 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
| 240 | lgf %r9,0(%r8,%r10) # get system call add. | 233 | lgf %r9,0(%r8,%r10) # get system call add. |
| 241 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 | 234 | tm __TI_flags+7(%r12),_TIF_TRACE |
| 242 | jnz sysc_tracesys | 235 | jnz sysc_tracesys |
| 243 | basr %r14,%r9 # call sys_xxxx | 236 | basr %r14,%r9 # call sys_xxxx |
| 244 | stg %r2,__PT_R2(%r11) # store return value | 237 | stg %r2,__PT_R2(%r11) # store return value |
| @@ -248,9 +241,12 @@ sysc_return: | |||
| 248 | sysc_tif: | 241 | sysc_tif: |
| 249 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 242 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
| 250 | jno sysc_restore | 243 | jno sysc_restore |
| 251 | tm __TI_flags+7(%r12),_TIF_WORK_SVC | 244 | tm __PT_FLAGS+7(%r11),_PIF_WORK |
| 245 | jnz sysc_work | ||
| 246 | tm __TI_flags+7(%r12),_TIF_WORK | ||
| 252 | jnz sysc_work # check for work | 247 | jnz sysc_work # check for work |
| 253 | ni __TI_flags+7(%r12),255-_TIF_SYSCALL | 248 | tm __LC_CPU_FLAGS+7,_CIF_WORK |
| 249 | jnz sysc_work | ||
| 254 | sysc_restore: | 250 | sysc_restore: |
| 255 | lg %r14,__LC_VDSO_PER_CPU | 251 | lg %r14,__LC_VDSO_PER_CPU |
| 256 | lmg %r0,%r10,__PT_R0(%r11) | 252 | lmg %r0,%r10,__PT_R0(%r11) |
| @@ -265,17 +261,17 @@ sysc_done: | |||
| 265 | # One of the work bits is on. Find out which one. | 261 | # One of the work bits is on. Find out which one. |
| 266 | # | 262 | # |
| 267 | sysc_work: | 263 | sysc_work: |
| 268 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 264 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
| 269 | jo sysc_mcck_pending | 265 | jo sysc_mcck_pending |
| 270 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 266 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
| 271 | jo sysc_reschedule | 267 | jo sysc_reschedule |
| 272 | tm __TI_flags+7(%r12),_TIF_PER_TRAP | 268 | tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
| 273 | jo sysc_singlestep | 269 | jo sysc_singlestep |
| 274 | tm __TI_flags+7(%r12),_TIF_SIGPENDING | 270 | tm __TI_flags+7(%r12),_TIF_SIGPENDING |
| 275 | jo sysc_sigpending | 271 | jo sysc_sigpending |
| 276 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 272 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
| 277 | jo sysc_notify_resume | 273 | jo sysc_notify_resume |
| 278 | tm __TI_flags+7(%r12),_TIF_ASCE | 274 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
| 279 | jo sysc_uaccess | 275 | jo sysc_uaccess |
| 280 | j sysc_return # beware of critical section cleanup | 276 | j sysc_return # beware of critical section cleanup |
| 281 | 277 | ||
| @@ -287,17 +283,17 @@ sysc_reschedule: | |||
| 287 | jg schedule | 283 | jg schedule |
| 288 | 284 | ||
| 289 | # | 285 | # |
| 290 | # _TIF_MCCK_PENDING is set, call handler | 286 | # _CIF_MCCK_PENDING is set, call handler |
| 291 | # | 287 | # |
| 292 | sysc_mcck_pending: | 288 | sysc_mcck_pending: |
| 293 | larl %r14,sysc_return | 289 | larl %r14,sysc_return |
| 294 | jg s390_handle_mcck # TIF bit will be cleared by handler | 290 | jg s390_handle_mcck # TIF bit will be cleared by handler |
| 295 | 291 | ||
| 296 | # | 292 | # |
| 297 | # _TIF_ASCE is set, load user space asce | 293 | # _CIF_ASCE is set, load user space asce |
| 298 | # | 294 | # |
| 299 | sysc_uaccess: | 295 | sysc_uaccess: |
| 300 | ni __TI_flags+7(%r12),255-_TIF_ASCE | 296 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
| 301 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 297 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
| 302 | j sysc_return | 298 | j sysc_return |
| 303 | 299 | ||
| @@ -307,7 +303,7 @@ sysc_uaccess: | |||
| 307 | sysc_sigpending: | 303 | sysc_sigpending: |
| 308 | lgr %r2,%r11 # pass pointer to pt_regs | 304 | lgr %r2,%r11 # pass pointer to pt_regs |
| 309 | brasl %r14,do_signal | 305 | brasl %r14,do_signal |
| 310 | tm __TI_flags+7(%r12),_TIF_SYSCALL | 306 | tm __PT_FLAGS+7(%r11),_PIF_SYSCALL |
| 311 | jno sysc_return | 307 | jno sysc_return |
| 312 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments | 308 | lmg %r2,%r7,__PT_R2(%r11) # load svc arguments |
| 313 | lg %r10,__TI_sysc_table(%r12) # address of system call table | 309 | lg %r10,__TI_sysc_table(%r12) # address of system call table |
| @@ -327,10 +323,10 @@ sysc_notify_resume: | |||
| 327 | jg do_notify_resume | 323 | jg do_notify_resume |
| 328 | 324 | ||
| 329 | # | 325 | # |
| 330 | # _TIF_PER_TRAP is set, call do_per_trap | 326 | # _PIF_PER_TRAP is set, call do_per_trap |
| 331 | # | 327 | # |
| 332 | sysc_singlestep: | 328 | sysc_singlestep: |
| 333 | ni __TI_flags+7(%r12),255-_TIF_PER_TRAP | 329 | ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP |
| 334 | lgr %r2,%r11 # pass pointer to pt_regs | 330 | lgr %r2,%r11 # pass pointer to pt_regs |
| 335 | larl %r14,sysc_return | 331 | larl %r14,sysc_return |
| 336 | jg do_per_trap | 332 | jg do_per_trap |
| @@ -357,7 +353,7 @@ sysc_tracego: | |||
| 357 | basr %r14,%r9 # call sys_xxx | 353 | basr %r14,%r9 # call sys_xxx |
| 358 | stg %r2,__PT_R2(%r11) # store return value | 354 | stg %r2,__PT_R2(%r11) # store return value |
| 359 | sysc_tracenogo: | 355 | sysc_tracenogo: |
| 360 | tm __TI_flags+6(%r12),_TIF_TRACE >> 8 | 356 | tm __TI_flags+7(%r12),_TIF_TRACE |
| 361 | jz sysc_return | 357 | jz sysc_return |
| 362 | lgr %r2,%r11 # pass pointer to pt_regs | 358 | lgr %r2,%r11 # pass pointer to pt_regs |
| 363 | larl %r14,sysc_return | 359 | larl %r14,sysc_return |
| @@ -416,12 +412,13 @@ ENTRY(pgm_check_handler) | |||
| 416 | stmg %r8,%r9,__PT_PSW(%r11) | 412 | stmg %r8,%r9,__PT_PSW(%r11) |
| 417 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC | 413 | mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC |
| 418 | mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE | 414 | mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE |
| 415 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
| 419 | stg %r10,__PT_ARGS(%r11) | 416 | stg %r10,__PT_ARGS(%r11) |
| 420 | tm __LC_PGM_ILC+3,0x80 # check for per exception | 417 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
| 421 | jz 0f | 418 | jz 0f |
| 422 | tmhh %r8,0x0001 # kernel per event ? | 419 | tmhh %r8,0x0001 # kernel per event ? |
| 423 | jz pgm_kprobe | 420 | jz pgm_kprobe |
| 424 | oi __TI_flags+7(%r12),_TIF_PER_TRAP | 421 | oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP |
| 425 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS | 422 | mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS |
| 426 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE | 423 | mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE |
| 427 | mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID | 424 | mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID |
| @@ -451,10 +448,10 @@ pgm_kprobe: | |||
| 451 | # single stepped system call | 448 | # single stepped system call |
| 452 | # | 449 | # |
| 453 | pgm_svcper: | 450 | pgm_svcper: |
| 454 | oi __TI_flags+7(%r12),_TIF_PER_TRAP | ||
| 455 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW | 451 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
| 456 | larl %r14,sysc_per | 452 | larl %r14,sysc_per |
| 457 | stg %r14,__LC_RETURN_PSW+8 | 453 | stg %r14,__LC_RETURN_PSW+8 |
| 454 | lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP | ||
| 458 | lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs | 455 | lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs |
| 459 | 456 | ||
| 460 | /* | 457 | /* |
| @@ -479,6 +476,7 @@ io_skip: | |||
| 479 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC | 476 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
| 480 | stmg %r8,%r9,__PT_PSW(%r11) | 477 | stmg %r8,%r9,__PT_PSW(%r11) |
| 481 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID | 478 | mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID |
| 479 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
| 482 | TRACE_IRQS_OFF | 480 | TRACE_IRQS_OFF |
| 483 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 481 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 484 | io_loop: | 482 | io_loop: |
| @@ -499,8 +497,10 @@ io_return: | |||
| 499 | LOCKDEP_SYS_EXIT | 497 | LOCKDEP_SYS_EXIT |
| 500 | TRACE_IRQS_ON | 498 | TRACE_IRQS_ON |
| 501 | io_tif: | 499 | io_tif: |
| 502 | tm __TI_flags+7(%r12),_TIF_WORK_INT | 500 | tm __TI_flags+7(%r12),_TIF_WORK |
| 503 | jnz io_work # there is work to do (signals etc.) | 501 | jnz io_work # there is work to do (signals etc.) |
| 502 | tm __LC_CPU_FLAGS+7,_CIF_WORK | ||
| 503 | jnz io_work | ||
| 504 | io_restore: | 504 | io_restore: |
| 505 | lg %r14,__LC_VDSO_PER_CPU | 505 | lg %r14,__LC_VDSO_PER_CPU |
| 506 | lmg %r0,%r10,__PT_R0(%r11) | 506 | lmg %r0,%r10,__PT_R0(%r11) |
| @@ -513,7 +513,7 @@ io_done: | |||
| 513 | 513 | ||
| 514 | # | 514 | # |
| 515 | # There is work todo, find out in which context we have been interrupted: | 515 | # There is work todo, find out in which context we have been interrupted: |
| 516 | # 1) if we return to user space we can do all _TIF_WORK_INT work | 516 | # 1) if we return to user space we can do all _TIF_WORK work |
| 517 | # 2) if we return to kernel code and kvm is enabled check if we need to | 517 | # 2) if we return to kernel code and kvm is enabled check if we need to |
| 518 | # modify the psw to leave SIE | 518 | # modify the psw to leave SIE |
| 519 | # 3) if we return to kernel code and preemptive scheduling is enabled check | 519 | # 3) if we return to kernel code and preemptive scheduling is enabled check |
| @@ -557,11 +557,9 @@ io_work_user: | |||
| 557 | 557 | ||
| 558 | # | 558 | # |
| 559 | # One of the work bits is on. Find out which one. | 559 | # One of the work bits is on. Find out which one. |
| 560 | # Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED | ||
| 561 | # and _TIF_MCCK_PENDING | ||
| 562 | # | 560 | # |
| 563 | io_work_tif: | 561 | io_work_tif: |
| 564 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 562 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
| 565 | jo io_mcck_pending | 563 | jo io_mcck_pending |
| 566 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED | 564 | tm __TI_flags+7(%r12),_TIF_NEED_RESCHED |
| 567 | jo io_reschedule | 565 | jo io_reschedule |
| @@ -569,12 +567,12 @@ io_work_tif: | |||
| 569 | jo io_sigpending | 567 | jo io_sigpending |
| 570 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME | 568 | tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME |
| 571 | jo io_notify_resume | 569 | jo io_notify_resume |
| 572 | tm __TI_flags+7(%r12),_TIF_ASCE | 570 | tm __LC_CPU_FLAGS+7,_CIF_ASCE |
| 573 | jo io_uaccess | 571 | jo io_uaccess |
| 574 | j io_return # beware of critical section cleanup | 572 | j io_return # beware of critical section cleanup |
| 575 | 573 | ||
| 576 | # | 574 | # |
| 577 | # _TIF_MCCK_PENDING is set, call handler | 575 | # _CIF_MCCK_PENDING is set, call handler |
| 578 | # | 576 | # |
| 579 | io_mcck_pending: | 577 | io_mcck_pending: |
| 580 | # TRACE_IRQS_ON already done at io_return | 578 | # TRACE_IRQS_ON already done at io_return |
| @@ -583,10 +581,10 @@ io_mcck_pending: | |||
| 583 | j io_return | 581 | j io_return |
| 584 | 582 | ||
| 585 | # | 583 | # |
| 586 | # _TIF_ASCE is set, load user space asce | 584 | # _CIF_ASCE is set, load user space asce |
| 587 | # | 585 | # |
| 588 | io_uaccess: | 586 | io_uaccess: |
| 589 | ni __TI_flags+7(%r12),255-_TIF_ASCE | 587 | ni __LC_CPU_FLAGS+7,255-_CIF_ASCE |
| 590 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 588 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
| 591 | j io_return | 589 | j io_return |
| 592 | 590 | ||
| @@ -650,6 +648,7 @@ ext_skip: | |||
| 650 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | 648 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR |
| 651 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | 649 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS |
| 652 | mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) | 650 | mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) |
| 651 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
| 653 | TRACE_IRQS_OFF | 652 | TRACE_IRQS_OFF |
| 654 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 653 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 655 | lgr %r2,%r11 # pass pointer to pt_regs | 654 | lgr %r2,%r11 # pass pointer to pt_regs |
| @@ -716,6 +715,7 @@ mcck_skip: | |||
| 716 | stmg %r0,%r7,__PT_R0(%r11) | 715 | stmg %r0,%r7,__PT_R0(%r11) |
| 717 | mvc __PT_R8(64,%r11),0(%r14) | 716 | mvc __PT_R8(64,%r11),0(%r14) |
| 718 | stmg %r8,%r9,__PT_PSW(%r11) | 717 | stmg %r8,%r9,__PT_PSW(%r11) |
| 718 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) | ||
| 719 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 719 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
| 720 | lgr %r2,%r11 # pass pointer to pt_regs | 720 | lgr %r2,%r11 # pass pointer to pt_regs |
| 721 | brasl %r14,s390_do_machine_check | 721 | brasl %r14,s390_do_machine_check |
| @@ -727,7 +727,7 @@ mcck_skip: | |||
| 727 | la %r11,STACK_FRAME_OVERHEAD(%r1) | 727 | la %r11,STACK_FRAME_OVERHEAD(%r1) |
| 728 | lgr %r15,%r1 | 728 | lgr %r15,%r1 |
| 729 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off | 729 | ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off |
| 730 | tm __TI_flags+7(%r12),_TIF_MCCK_PENDING | 730 | tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING |
| 731 | jno mcck_return | 731 | jno mcck_return |
| 732 | TRACE_IRQS_OFF | 732 | TRACE_IRQS_OFF |
| 733 | brasl %r14,s390_handle_mcck | 733 | brasl %r14,s390_handle_mcck |
| @@ -884,6 +884,8 @@ cleanup_system_call: | |||
| 884 | stmg %r0,%r7,__PT_R0(%r9) | 884 | stmg %r0,%r7,__PT_R0(%r9) |
| 885 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW | 885 | mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW |
| 886 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC | 886 | mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC |
| 887 | xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) | ||
| 888 | mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL | ||
| 887 | # setup saved register r15 | 889 | # setup saved register r15 |
| 888 | stg %r15,56(%r11) # r15 stack pointer | 890 | stg %r15,56(%r11) # r15 stack pointer |
| 889 | # set new psw address and exit | 891 | # set new psw address and exit |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 429afcc480cb..7ba7d6784510 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
| @@ -437,13 +437,13 @@ ENTRY(startup_kdump) | |||
| 437 | 437 | ||
| 438 | #if defined(CONFIG_64BIT) | 438 | #if defined(CONFIG_64BIT) |
| 439 | #if defined(CONFIG_MARCH_ZEC12) | 439 | #if defined(CONFIG_MARCH_ZEC12) |
| 440 | .long 3, 0xc100efe3, 0xf46ce800, 0x00400000 | 440 | .long 3, 0xc100efea, 0xf46ce800, 0x00400000 |
| 441 | #elif defined(CONFIG_MARCH_Z196) | 441 | #elif defined(CONFIG_MARCH_Z196) |
| 442 | .long 2, 0xc100efe3, 0xf46c0000 | 442 | .long 2, 0xc100efea, 0xf46c0000 |
| 443 | #elif defined(CONFIG_MARCH_Z10) | 443 | #elif defined(CONFIG_MARCH_Z10) |
| 444 | .long 2, 0xc100efe3, 0xf0680000 | 444 | .long 2, 0xc100efea, 0xf0680000 |
| 445 | #elif defined(CONFIG_MARCH_Z9_109) | 445 | #elif defined(CONFIG_MARCH_Z9_109) |
| 446 | .long 1, 0xc100efc3 | 446 | .long 1, 0xc100efc2 |
| 447 | #elif defined(CONFIG_MARCH_Z990) | 447 | #elif defined(CONFIG_MARCH_Z990) |
| 448 | .long 1, 0xc0002000 | 448 | .long 1, 0xc0002000 |
| 449 | #elif defined(CONFIG_MARCH_Z900) | 449 | #elif defined(CONFIG_MARCH_Z900) |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 9a99856df1c9..6dbe80983a24 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
| @@ -59,7 +59,6 @@ ENTRY(startup_continue) | |||
| 59 | .long 0 # cr13: home space segment table | 59 | .long 0 # cr13: home space segment table |
| 60 | .long 0xc0000000 # cr14: machine check handling off | 60 | .long 0xc0000000 # cr14: machine check handling off |
| 61 | .long 0 # cr15: linkage stack operations | 61 | .long 0 # cr15: linkage stack operations |
| 62 | .Lmchunk:.long memory_chunk | ||
| 63 | .Lbss_bgn: .long __bss_start | 62 | .Lbss_bgn: .long __bss_start |
| 64 | .Lbss_end: .long _end | 63 | .Lbss_end: .long _end |
| 65 | .Lparmaddr: .long PARMAREA | 64 | .Lparmaddr: .long PARMAREA |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index c4c033819879..210e1285f75a 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
| @@ -55,7 +55,7 @@ void s390_handle_mcck(void) | |||
| 55 | local_mcck_disable(); | 55 | local_mcck_disable(); |
| 56 | mcck = __get_cpu_var(cpu_mcck); | 56 | mcck = __get_cpu_var(cpu_mcck); |
| 57 | memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); | 57 | memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); |
| 58 | clear_thread_flag(TIF_MCCK_PENDING); | 58 | clear_cpu_flag(CIF_MCCK_PENDING); |
| 59 | local_mcck_enable(); | 59 | local_mcck_enable(); |
| 60 | local_irq_restore(flags); | 60 | local_irq_restore(flags); |
| 61 | 61 | ||
| @@ -313,7 +313,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) | |||
| 313 | */ | 313 | */ |
| 314 | mcck->kill_task = 1; | 314 | mcck->kill_task = 1; |
| 315 | mcck->mcck_code = *(unsigned long long *) mci; | 315 | mcck->mcck_code = *(unsigned long long *) mci; |
| 316 | set_thread_flag(TIF_MCCK_PENDING); | 316 | set_cpu_flag(CIF_MCCK_PENDING); |
| 317 | } else { | 317 | } else { |
| 318 | /* | 318 | /* |
| 319 | * Couldn't restore all register contents while in | 319 | * Couldn't restore all register contents while in |
| @@ -352,12 +352,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs) | |||
| 352 | if (mci->cp) { | 352 | if (mci->cp) { |
| 353 | /* Channel report word pending */ | 353 | /* Channel report word pending */ |
| 354 | mcck->channel_report = 1; | 354 | mcck->channel_report = 1; |
| 355 | set_thread_flag(TIF_MCCK_PENDING); | 355 | set_cpu_flag(CIF_MCCK_PENDING); |
| 356 | } | 356 | } |
| 357 | if (mci->w) { | 357 | if (mci->w) { |
| 358 | /* Warning pending */ | 358 | /* Warning pending */ |
| 359 | mcck->warning = 1; | 359 | mcck->warning = 1; |
| 360 | set_thread_flag(TIF_MCCK_PENDING); | 360 | set_cpu_flag(CIF_MCCK_PENDING); |
| 361 | } | 361 | } |
| 362 | nmi_exit(); | 362 | nmi_exit(); |
| 363 | } | 363 | } |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index dd145321d215..93b9ca42e5c0 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
| @@ -64,7 +64,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
| 64 | void arch_cpu_idle(void) | 64 | void arch_cpu_idle(void) |
| 65 | { | 65 | { |
| 66 | local_mcck_disable(); | 66 | local_mcck_disable(); |
| 67 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 67 | if (test_cpu_flag(CIF_MCCK_PENDING)) { |
| 68 | local_mcck_enable(); | 68 | local_mcck_enable(); |
| 69 | local_irq_enable(); | 69 | local_irq_enable(); |
| 70 | return; | 70 | return; |
| @@ -76,7 +76,7 @@ void arch_cpu_idle(void) | |||
| 76 | 76 | ||
| 77 | void arch_cpu_idle_exit(void) | 77 | void arch_cpu_idle_exit(void) |
| 78 | { | 78 | { |
| 79 | if (test_thread_flag(TIF_MCCK_PENDING)) | 79 | if (test_cpu_flag(CIF_MCCK_PENDING)) |
| 80 | s390_handle_mcck(); | 80 | s390_handle_mcck(); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| @@ -123,7 +123,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
| 123 | memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); | 123 | memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); |
| 124 | memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); | 124 | memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); |
| 125 | clear_tsk_thread_flag(p, TIF_SINGLE_STEP); | 125 | clear_tsk_thread_flag(p, TIF_SINGLE_STEP); |
| 126 | clear_tsk_thread_flag(p, TIF_PER_TRAP); | ||
| 127 | /* Initialize per thread user and system timer values */ | 126 | /* Initialize per thread user and system timer values */ |
| 128 | ti = task_thread_info(p); | 127 | ti = task_thread_info(p); |
| 129 | ti->user_timer = 0; | 128 | ti->user_timer = 0; |
| @@ -152,6 +151,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
| 152 | } | 151 | } |
| 153 | frame->childregs = *current_pt_regs(); | 152 | frame->childregs = *current_pt_regs(); |
| 154 | frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ | 153 | frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ |
| 154 | frame->childregs.flags = 0; | ||
| 155 | if (new_stackp) | 155 | if (new_stackp) |
| 156 | frame->childregs.gprs[15] = new_stackp; | 156 | frame->childregs.gprs[15] = new_stackp; |
| 157 | 157 | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1c82619eb4f7..2d716734b5b1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
| @@ -136,7 +136,7 @@ void ptrace_disable(struct task_struct *task) | |||
| 136 | memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); | 136 | memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); |
| 137 | memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); | 137 | memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); |
| 138 | clear_tsk_thread_flag(task, TIF_SINGLE_STEP); | 138 | clear_tsk_thread_flag(task, TIF_SINGLE_STEP); |
| 139 | clear_tsk_thread_flag(task, TIF_PER_TRAP); | 139 | clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP); |
| 140 | task->thread.per_flags = 0; | 140 | task->thread.per_flags = 0; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| @@ -813,7 +813,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) | |||
| 813 | * debugger stored an invalid system call number. Skip | 813 | * debugger stored an invalid system call number. Skip |
| 814 | * the system call and the system call restart handling. | 814 | * the system call and the system call restart handling. |
| 815 | */ | 815 | */ |
| 816 | clear_thread_flag(TIF_SYSCALL); | 816 | clear_pt_regs_flag(regs, PIF_SYSCALL); |
| 817 | ret = -1; | 817 | ret = -1; |
| 818 | } | 818 | } |
| 819 | 819 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 88d1ca81e2dd..1e2264b46e4c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -78,10 +78,9 @@ EXPORT_SYMBOL(console_irq); | |||
| 78 | unsigned long elf_hwcap = 0; | 78 | unsigned long elf_hwcap = 0; |
| 79 | char elf_platform[ELF_PLATFORM_SIZE]; | 79 | char elf_platform[ELF_PLATFORM_SIZE]; |
| 80 | 80 | ||
| 81 | struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; | ||
| 82 | |||
| 83 | int __initdata memory_end_set; | 81 | int __initdata memory_end_set; |
| 84 | unsigned long __initdata memory_end; | 82 | unsigned long __initdata memory_end; |
| 83 | unsigned long __initdata max_physmem_end; | ||
| 85 | 84 | ||
| 86 | unsigned long VMALLOC_START; | 85 | unsigned long VMALLOC_START; |
| 87 | EXPORT_SYMBOL(VMALLOC_START); | 86 | EXPORT_SYMBOL(VMALLOC_START); |
| @@ -212,7 +211,7 @@ static void __init conmode_default(void) | |||
| 212 | } | 211 | } |
| 213 | } | 212 | } |
| 214 | 213 | ||
| 215 | #ifdef CONFIG_ZFCPDUMP | 214 | #ifdef CONFIG_CRASH_DUMP |
| 216 | static void __init setup_zfcpdump(void) | 215 | static void __init setup_zfcpdump(void) |
| 217 | { | 216 | { |
| 218 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 217 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
| @@ -224,7 +223,7 @@ static void __init setup_zfcpdump(void) | |||
| 224 | } | 223 | } |
| 225 | #else | 224 | #else |
| 226 | static inline void setup_zfcpdump(void) {} | 225 | static inline void setup_zfcpdump(void) {} |
| 227 | #endif /* CONFIG_ZFCPDUMP */ | 226 | #endif /* CONFIG_CRASH_DUMP */ |
| 228 | 227 | ||
| 229 | /* | 228 | /* |
| 230 | * Reboot, halt and power_off stubs. They just call _machine_restart, | 229 | * Reboot, halt and power_off stubs. They just call _machine_restart, |
| @@ -273,6 +272,7 @@ EXPORT_SYMBOL_GPL(pm_power_off); | |||
| 273 | static int __init early_parse_mem(char *p) | 272 | static int __init early_parse_mem(char *p) |
| 274 | { | 273 | { |
| 275 | memory_end = memparse(p, &p); | 274 | memory_end = memparse(p, &p); |
| 275 | memory_end &= PAGE_MASK; | ||
| 276 | memory_end_set = 1; | 276 | memory_end_set = 1; |
| 277 | return 0; | 277 | return 0; |
| 278 | } | 278 | } |
| @@ -373,6 +373,10 @@ static void __init setup_lowcore(void) | |||
| 373 | mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); | 373 | mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); |
| 374 | mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); | 374 | mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); |
| 375 | 375 | ||
| 376 | #ifdef CONFIG_SMP | ||
| 377 | lc->spinlock_lockval = arch_spin_lockval(0); | ||
| 378 | #endif | ||
| 379 | |||
| 376 | set_prefix((u32)(unsigned long) lc); | 380 | set_prefix((u32)(unsigned long) lc); |
| 377 | lowcore_ptr[0] = lc; | 381 | lowcore_ptr[0] = lc; |
| 378 | } | 382 | } |
| @@ -401,7 +405,8 @@ static struct resource __initdata *standard_resources[] = { | |||
| 401 | static void __init setup_resources(void) | 405 | static void __init setup_resources(void) |
| 402 | { | 406 | { |
| 403 | struct resource *res, *std_res, *sub_res; | 407 | struct resource *res, *std_res, *sub_res; |
| 404 | int i, j; | 408 | struct memblock_region *reg; |
| 409 | int j; | ||
| 405 | 410 | ||
| 406 | code_resource.start = (unsigned long) &_text; | 411 | code_resource.start = (unsigned long) &_text; |
| 407 | code_resource.end = (unsigned long) &_etext - 1; | 412 | code_resource.end = (unsigned long) &_etext - 1; |
| @@ -410,24 +415,13 @@ static void __init setup_resources(void) | |||
| 410 | bss_resource.start = (unsigned long) &__bss_start; | 415 | bss_resource.start = (unsigned long) &__bss_start; |
| 411 | bss_resource.end = (unsigned long) &__bss_stop - 1; | 416 | bss_resource.end = (unsigned long) &__bss_stop - 1; |
| 412 | 417 | ||
| 413 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 418 | for_each_memblock(memory, reg) { |
| 414 | if (!memory_chunk[i].size) | ||
| 415 | continue; | ||
| 416 | res = alloc_bootmem_low(sizeof(*res)); | 419 | res = alloc_bootmem_low(sizeof(*res)); |
| 417 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 420 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
| 418 | switch (memory_chunk[i].type) { | 421 | |
| 419 | case CHUNK_READ_WRITE: | 422 | res->name = "System RAM"; |
| 420 | res->name = "System RAM"; | 423 | res->start = reg->base; |
| 421 | break; | 424 | res->end = reg->base + reg->size - 1; |
| 422 | case CHUNK_READ_ONLY: | ||
| 423 | res->name = "System ROM"; | ||
| 424 | res->flags |= IORESOURCE_READONLY; | ||
| 425 | break; | ||
| 426 | default: | ||
| 427 | res->name = "reserved"; | ||
| 428 | } | ||
| 429 | res->start = memory_chunk[i].addr; | ||
| 430 | res->end = res->start + memory_chunk[i].size - 1; | ||
| 431 | request_resource(&iomem_resource, res); | 425 | request_resource(&iomem_resource, res); |
| 432 | 426 | ||
| 433 | for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { | 427 | for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { |
| @@ -451,48 +445,11 @@ static void __init setup_resources(void) | |||
| 451 | static void __init setup_memory_end(void) | 445 | static void __init setup_memory_end(void) |
| 452 | { | 446 | { |
| 453 | unsigned long vmax, vmalloc_size, tmp; | 447 | unsigned long vmax, vmalloc_size, tmp; |
| 454 | unsigned long real_memory_size = 0; | ||
| 455 | int i; | ||
| 456 | |||
| 457 | |||
| 458 | #ifdef CONFIG_ZFCPDUMP | ||
| 459 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && | ||
| 460 | !OLDMEM_BASE && sclp_get_hsa_size()) { | ||
| 461 | memory_end = sclp_get_hsa_size(); | ||
| 462 | memory_end_set = 1; | ||
| 463 | } | ||
| 464 | #endif | ||
| 465 | memory_end &= PAGE_MASK; | ||
| 466 | |||
| 467 | /* | ||
| 468 | * Make sure all chunks are MAX_ORDER aligned so we don't need the | ||
| 469 | * extra checks that HOLES_IN_ZONE would require. | ||
| 470 | */ | ||
| 471 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 472 | unsigned long start, end; | ||
| 473 | struct mem_chunk *chunk; | ||
| 474 | unsigned long align; | ||
| 475 | |||
| 476 | chunk = &memory_chunk[i]; | ||
| 477 | if (!chunk->size) | ||
| 478 | continue; | ||
| 479 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); | ||
| 480 | start = (chunk->addr + align - 1) & ~(align - 1); | ||
| 481 | end = (chunk->addr + chunk->size) & ~(align - 1); | ||
| 482 | if (start >= end) | ||
| 483 | memset(chunk, 0, sizeof(*chunk)); | ||
| 484 | else { | ||
| 485 | chunk->addr = start; | ||
| 486 | chunk->size = end - start; | ||
| 487 | } | ||
| 488 | real_memory_size = max(real_memory_size, | ||
| 489 | chunk->addr + chunk->size); | ||
| 490 | } | ||
| 491 | 448 | ||
| 492 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ | 449 | /* Choose kernel address space layout: 2, 3, or 4 levels. */ |
| 493 | #ifdef CONFIG_64BIT | 450 | #ifdef CONFIG_64BIT |
| 494 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; | 451 | vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; |
| 495 | tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; | 452 | tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; |
| 496 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; | 453 | tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; |
| 497 | if (tmp <= (1UL << 42)) | 454 | if (tmp <= (1UL << 42)) |
| 498 | vmax = 1UL << 42; /* 3-level kernel page table */ | 455 | vmax = 1UL << 42; /* 3-level kernel page table */ |
| @@ -520,21 +477,11 @@ static void __init setup_memory_end(void) | |||
| 520 | vmemmap = (struct page *) tmp; | 477 | vmemmap = (struct page *) tmp; |
| 521 | 478 | ||
| 522 | /* Take care that memory_end is set and <= vmemmap */ | 479 | /* Take care that memory_end is set and <= vmemmap */ |
| 523 | memory_end = min(memory_end ?: real_memory_size, tmp); | 480 | memory_end = min(memory_end ?: max_physmem_end, tmp); |
| 524 | 481 | max_pfn = max_low_pfn = PFN_DOWN(memory_end); | |
| 525 | /* Fixup memory chunk array to fit into 0..memory_end */ | 482 | memblock_remove(memory_end, ULONG_MAX); |
| 526 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 527 | struct mem_chunk *chunk = &memory_chunk[i]; | ||
| 528 | 483 | ||
| 529 | if (!chunk->size) | 484 | pr_notice("Max memory size: %luMB\n", memory_end >> 20); |
| 530 | continue; | ||
| 531 | if (chunk->addr >= memory_end) { | ||
| 532 | memset(chunk, 0, sizeof(*chunk)); | ||
| 533 | continue; | ||
| 534 | } | ||
| 535 | if (chunk->addr + chunk->size > memory_end) | ||
| 536 | chunk->size = memory_end - chunk->addr; | ||
| 537 | } | ||
| 538 | } | 485 | } |
| 539 | 486 | ||
| 540 | static void __init setup_vmcoreinfo(void) | 487 | static void __init setup_vmcoreinfo(void) |
| @@ -545,89 +492,6 @@ static void __init setup_vmcoreinfo(void) | |||
| 545 | #ifdef CONFIG_CRASH_DUMP | 492 | #ifdef CONFIG_CRASH_DUMP |
| 546 | 493 | ||
| 547 | /* | 494 | /* |
| 548 | * Find suitable location for crashkernel memory | ||
| 549 | */ | ||
| 550 | static unsigned long __init find_crash_base(unsigned long crash_size, | ||
| 551 | char **msg) | ||
| 552 | { | ||
| 553 | unsigned long crash_base; | ||
| 554 | struct mem_chunk *chunk; | ||
| 555 | int i; | ||
| 556 | |||
| 557 | if (memory_chunk[0].size < crash_size) { | ||
| 558 | *msg = "first memory chunk must be at least crashkernel size"; | ||
| 559 | return 0; | ||
| 560 | } | ||
| 561 | if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) | ||
| 562 | return OLDMEM_BASE; | ||
| 563 | |||
| 564 | for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { | ||
| 565 | chunk = &memory_chunk[i]; | ||
| 566 | if (chunk->size == 0) | ||
| 567 | continue; | ||
| 568 | if (chunk->type != CHUNK_READ_WRITE) | ||
| 569 | continue; | ||
| 570 | if (chunk->size < crash_size) | ||
| 571 | continue; | ||
| 572 | crash_base = (chunk->addr + chunk->size) - crash_size; | ||
| 573 | if (crash_base < crash_size) | ||
| 574 | continue; | ||
| 575 | if (crash_base < sclp_get_hsa_size()) | ||
| 576 | continue; | ||
| 577 | if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) | ||
| 578 | continue; | ||
| 579 | return crash_base; | ||
| 580 | } | ||
| 581 | *msg = "no suitable area found"; | ||
| 582 | return 0; | ||
| 583 | } | ||
| 584 | |||
| 585 | /* | ||
| 586 | * Check if crash_base and crash_size is valid | ||
| 587 | */ | ||
| 588 | static int __init verify_crash_base(unsigned long crash_base, | ||
| 589 | unsigned long crash_size, | ||
| 590 | char **msg) | ||
| 591 | { | ||
| 592 | struct mem_chunk *chunk; | ||
| 593 | int i; | ||
| 594 | |||
| 595 | /* | ||
| 596 | * Because we do the swap to zero, we must have at least 'crash_size' | ||
| 597 | * bytes free space before crash_base | ||
| 598 | */ | ||
| 599 | if (crash_size > crash_base) { | ||
| 600 | *msg = "crashkernel offset must be greater than size"; | ||
| 601 | return -EINVAL; | ||
| 602 | } | ||
| 603 | |||
| 604 | /* First memory chunk must be at least crash_size */ | ||
| 605 | if (memory_chunk[0].size < crash_size) { | ||
| 606 | *msg = "first memory chunk must be at least crashkernel size"; | ||
| 607 | return -EINVAL; | ||
| 608 | } | ||
| 609 | /* Check if we fit into the respective memory chunk */ | ||
| 610 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 611 | chunk = &memory_chunk[i]; | ||
| 612 | if (chunk->size == 0) | ||
| 613 | continue; | ||
| 614 | if (crash_base < chunk->addr) | ||
| 615 | continue; | ||
| 616 | if (crash_base >= chunk->addr + chunk->size) | ||
| 617 | continue; | ||
| 618 | /* we have found the memory chunk */ | ||
| 619 | if (crash_base + crash_size > chunk->addr + chunk->size) { | ||
| 620 | *msg = "selected memory chunk is too small for " | ||
| 621 | "crashkernel memory"; | ||
| 622 | return -EINVAL; | ||
| 623 | } | ||
| 624 | return 0; | ||
| 625 | } | ||
| 626 | *msg = "invalid memory range specified"; | ||
| 627 | return -EINVAL; | ||
| 628 | } | ||
| 629 | |||
| 630 | /* | ||
| 631 | * When kdump is enabled, we have to ensure that no memory from | 495 | * When kdump is enabled, we have to ensure that no memory from |
| 632 | * the area [0 - crashkernel memory size] and | 496 | * the area [0 - crashkernel memory size] and |
| 633 | * [crashk_res.start - crashk_res.end] is set offline. | 497 | * [crashk_res.start - crashk_res.end] is set offline. |
| @@ -653,23 +517,44 @@ static struct notifier_block kdump_mem_nb = { | |||
| 653 | #endif | 517 | #endif |
| 654 | 518 | ||
| 655 | /* | 519 | /* |
| 520 | * Make sure that the area behind memory_end is protected | ||
| 521 | */ | ||
| 522 | static void reserve_memory_end(void) | ||
| 523 | { | ||
| 524 | #ifdef CONFIG_CRASH_DUMP | ||
| 525 | if (ipl_info.type == IPL_TYPE_FCP_DUMP && | ||
| 526 | !OLDMEM_BASE && sclp_get_hsa_size()) { | ||
| 527 | memory_end = sclp_get_hsa_size(); | ||
| 528 | memory_end &= PAGE_MASK; | ||
| 529 | memory_end_set = 1; | ||
| 530 | } | ||
| 531 | #endif | ||
| 532 | if (!memory_end_set) | ||
| 533 | return; | ||
| 534 | memblock_reserve(memory_end, ULONG_MAX); | ||
| 535 | } | ||
| 536 | |||
| 537 | /* | ||
| 656 | * Make sure that oldmem, where the dump is stored, is protected | 538 | * Make sure that oldmem, where the dump is stored, is protected |
| 657 | */ | 539 | */ |
| 658 | static void reserve_oldmem(void) | 540 | static void reserve_oldmem(void) |
| 659 | { | 541 | { |
| 660 | #ifdef CONFIG_CRASH_DUMP | 542 | #ifdef CONFIG_CRASH_DUMP |
| 661 | unsigned long real_size = 0; | 543 | if (OLDMEM_BASE) |
| 662 | int i; | 544 | /* Forget all memory above the running kdump system */ |
| 663 | 545 | memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); | |
| 664 | if (!OLDMEM_BASE) | 546 | #endif |
| 665 | return; | 547 | } |
| 666 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 667 | struct mem_chunk *chunk = &memory_chunk[i]; | ||
| 668 | 548 | ||
| 669 | real_size = max(real_size, chunk->addr + chunk->size); | 549 | /* |
| 670 | } | 550 | * Make sure that oldmem, where the dump is stored, is protected |
| 671 | create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); | 551 | */ |
| 672 | create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); | 552 | static void remove_oldmem(void) |
| 553 | { | ||
| 554 | #ifdef CONFIG_CRASH_DUMP | ||
| 555 | if (OLDMEM_BASE) | ||
| 556 | /* Forget all memory above the running kdump system */ | ||
| 557 | memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); | ||
| 673 | #endif | 558 | #endif |
| 674 | } | 559 | } |
| 675 | 560 | ||
| @@ -680,167 +565,132 @@ static void __init reserve_crashkernel(void) | |||
| 680 | { | 565 | { |
| 681 | #ifdef CONFIG_CRASH_DUMP | 566 | #ifdef CONFIG_CRASH_DUMP |
| 682 | unsigned long long crash_base, crash_size; | 567 | unsigned long long crash_base, crash_size; |
| 683 | char *msg = NULL; | 568 | phys_addr_t low, high; |
| 684 | int rc; | 569 | int rc; |
| 685 | 570 | ||
| 686 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, | 571 | rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, |
| 687 | &crash_base); | 572 | &crash_base); |
| 688 | if (rc || crash_size == 0) | 573 | |
| 689 | return; | ||
| 690 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); | 574 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); |
| 691 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); | 575 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); |
| 692 | if (register_memory_notifier(&kdump_mem_nb)) | 576 | if (rc || crash_size == 0) |
| 693 | return; | 577 | return; |
| 694 | if (!crash_base) | 578 | |
| 695 | crash_base = find_crash_base(crash_size, &msg); | 579 | if (memblock.memory.regions[0].size < crash_size) { |
| 696 | if (!crash_base) { | 580 | pr_info("crashkernel reservation failed: %s\n", |
| 697 | pr_info("crashkernel reservation failed: %s\n", msg); | 581 | "first memory chunk must be at least crashkernel size"); |
| 698 | unregister_memory_notifier(&kdump_mem_nb); | ||
| 699 | return; | 582 | return; |
| 700 | } | 583 | } |
| 701 | if (verify_crash_base(crash_base, crash_size, &msg)) { | 584 | |
| 702 | pr_info("crashkernel reservation failed: %s\n", msg); | 585 | low = crash_base ?: OLDMEM_BASE; |
| 703 | unregister_memory_notifier(&kdump_mem_nb); | 586 | high = low + crash_size; |
| 587 | if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) { | ||
| 588 | /* The crashkernel fits into OLDMEM, reuse OLDMEM */ | ||
| 589 | crash_base = low; | ||
| 590 | } else { | ||
| 591 | /* Find suitable area in free memory */ | ||
| 592 | low = max_t(unsigned long, crash_size, sclp_get_hsa_size()); | ||
| 593 | high = crash_base ? crash_base + crash_size : ULONG_MAX; | ||
| 594 | |||
| 595 | if (crash_base && crash_base < low) { | ||
| 596 | pr_info("crashkernel reservation failed: %s\n", | ||
| 597 | "crash_base too low"); | ||
| 598 | return; | ||
| 599 | } | ||
| 600 | low = crash_base ?: low; | ||
| 601 | crash_base = memblock_find_in_range(low, high, crash_size, | ||
| 602 | KEXEC_CRASH_MEM_ALIGN); | ||
| 603 | } | ||
| 604 | |||
| 605 | if (!crash_base) { | ||
| 606 | pr_info("crashkernel reservation failed: %s\n", | ||
| 607 | "no suitable area found"); | ||
| 704 | return; | 608 | return; |
| 705 | } | 609 | } |
| 610 | |||
| 611 | if (register_memory_notifier(&kdump_mem_nb)) | ||
| 612 | return; | ||
| 613 | |||
| 706 | if (!OLDMEM_BASE && MACHINE_IS_VM) | 614 | if (!OLDMEM_BASE && MACHINE_IS_VM) |
| 707 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); | 615 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); |
| 708 | crashk_res.start = crash_base; | 616 | crashk_res.start = crash_base; |
| 709 | crashk_res.end = crash_base + crash_size - 1; | 617 | crashk_res.end = crash_base + crash_size - 1; |
| 710 | insert_resource(&iomem_resource, &crashk_res); | 618 | insert_resource(&iomem_resource, &crashk_res); |
| 711 | create_mem_hole(memory_chunk, crash_base, crash_size); | 619 | memblock_remove(crash_base, crash_size); |
| 712 | pr_info("Reserving %lluMB of memory at %lluMB " | 620 | pr_info("Reserving %lluMB of memory at %lluMB " |
| 713 | "for crashkernel (System RAM: %luMB)\n", | 621 | "for crashkernel (System RAM: %luMB)\n", |
| 714 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | 622 | crash_size >> 20, crash_base >> 20, |
| 623 | (unsigned long)memblock.memory.total_size >> 20); | ||
| 715 | os_info_crashkernel_add(crash_base, crash_size); | 624 | os_info_crashkernel_add(crash_base, crash_size); |
| 716 | #endif | 625 | #endif |
| 717 | } | 626 | } |
| 718 | 627 | ||
| 719 | static void __init setup_memory(void) | 628 | /* |
| 629 | * Reserve the initrd from being used by memblock | ||
| 630 | */ | ||
| 631 | static void __init reserve_initrd(void) | ||
| 720 | { | 632 | { |
| 721 | unsigned long bootmap_size; | 633 | #ifdef CONFIG_BLK_DEV_INITRD |
| 722 | unsigned long start_pfn, end_pfn; | 634 | initrd_start = INITRD_START; |
| 723 | int i; | 635 | initrd_end = initrd_start + INITRD_SIZE; |
| 636 | memblock_reserve(INITRD_START, INITRD_SIZE); | ||
| 637 | #endif | ||
| 638 | } | ||
| 724 | 639 | ||
| 725 | /* | 640 | /* |
| 726 | * partially used pages are not usable - thus | 641 | * Check for initrd being in usable memory |
| 727 | * we are rounding upwards: | 642 | */ |
| 728 | */ | 643 | static void __init check_initrd(void) |
| 644 | { | ||
| 645 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 646 | if (INITRD_START && INITRD_SIZE && | ||
| 647 | !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { | ||
| 648 | pr_err("initrd does not fit memory.\n"); | ||
| 649 | memblock_free(INITRD_START, INITRD_SIZE); | ||
| 650 | initrd_start = initrd_end = 0; | ||
| 651 | } | ||
| 652 | #endif | ||
| 653 | } | ||
| 654 | |||
| 655 | /* | ||
| 656 | * Reserve all kernel text | ||
| 657 | */ | ||
| 658 | static void __init reserve_kernel(void) | ||
| 659 | { | ||
| 660 | unsigned long start_pfn; | ||
| 729 | start_pfn = PFN_UP(__pa(&_end)); | 661 | start_pfn = PFN_UP(__pa(&_end)); |
| 730 | end_pfn = max_pfn = PFN_DOWN(memory_end); | ||
| 731 | 662 | ||
| 732 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 733 | /* | 663 | /* |
| 734 | * Move the initrd in case the bitmap of the bootmem allocater | 664 | * Reserve memory used for lowcore/command line/kernel image. |
| 735 | * would overwrite it. | ||
| 736 | */ | 665 | */ |
| 666 | memblock_reserve(0, (unsigned long)_ehead); | ||
| 667 | memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) | ||
| 668 | - (unsigned long)_stext); | ||
| 669 | } | ||
| 737 | 670 | ||
| 738 | if (INITRD_START && INITRD_SIZE) { | 671 | static void __init reserve_elfcorehdr(void) |
| 739 | unsigned long bmap_size; | 672 | { |
| 740 | unsigned long start; | ||
| 741 | |||
| 742 | bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); | ||
| 743 | bmap_size = PFN_PHYS(bmap_size); | ||
| 744 | |||
| 745 | if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { | ||
| 746 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | ||
| 747 | |||
| 748 | #ifdef CONFIG_CRASH_DUMP | 673 | #ifdef CONFIG_CRASH_DUMP |
| 749 | if (OLDMEM_BASE) { | 674 | if (is_kdump_kernel()) |
| 750 | /* Move initrd behind kdump oldmem */ | 675 | memblock_reserve(elfcorehdr_addr - OLDMEM_BASE, |
| 751 | if (start + INITRD_SIZE > OLDMEM_BASE && | 676 | PAGE_ALIGN(elfcorehdr_size)); |
| 752 | start < OLDMEM_BASE + OLDMEM_SIZE) | ||
| 753 | start = OLDMEM_BASE + OLDMEM_SIZE; | ||
| 754 | } | ||
| 755 | #endif | ||
| 756 | if (start + INITRD_SIZE > memory_end) { | ||
| 757 | pr_err("initrd extends beyond end of " | ||
| 758 | "memory (0x%08lx > 0x%08lx) " | ||
| 759 | "disabling initrd\n", | ||
| 760 | start + INITRD_SIZE, memory_end); | ||
| 761 | INITRD_START = INITRD_SIZE = 0; | ||
| 762 | } else { | ||
| 763 | pr_info("Moving initrd (0x%08lx -> " | ||
| 764 | "0x%08lx, size: %ld)\n", | ||
| 765 | INITRD_START, start, INITRD_SIZE); | ||
| 766 | memmove((void *) start, (void *) INITRD_START, | ||
| 767 | INITRD_SIZE); | ||
| 768 | INITRD_START = start; | ||
| 769 | } | ||
| 770 | } | ||
| 771 | } | ||
| 772 | #endif | 677 | #endif |
| 678 | } | ||
| 773 | 679 | ||
| 774 | /* | 680 | static void __init setup_memory(void) |
| 775 | * Initialize the boot-time allocator | 681 | { |
| 776 | */ | 682 | struct memblock_region *reg; |
| 777 | bootmap_size = init_bootmem(start_pfn, end_pfn); | ||
| 778 | 683 | ||
| 779 | /* | 684 | /* |
| 780 | * Register RAM areas with the bootmem allocator. | 685 | * Init storage key for present memory |
| 781 | */ | 686 | */ |
| 782 | 687 | for_each_memblock(memory, reg) { | |
| 783 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 688 | storage_key_init_range(reg->base, reg->base + reg->size); |
| 784 | unsigned long start_chunk, end_chunk, pfn; | ||
| 785 | |||
| 786 | if (!memory_chunk[i].size) | ||
| 787 | continue; | ||
| 788 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | ||
| 789 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | ||
| 790 | end_chunk = min(end_chunk, end_pfn); | ||
| 791 | if (start_chunk >= end_chunk) | ||
| 792 | continue; | ||
| 793 | memblock_add_node(PFN_PHYS(start_chunk), | ||
| 794 | PFN_PHYS(end_chunk - start_chunk), 0); | ||
| 795 | pfn = max(start_chunk, start_pfn); | ||
| 796 | storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk)); | ||
| 797 | } | 689 | } |
| 798 | |||
| 799 | psw_set_key(PAGE_DEFAULT_KEY); | 690 | psw_set_key(PAGE_DEFAULT_KEY); |
| 800 | 691 | ||
| 801 | free_bootmem_with_active_regions(0, max_pfn); | 692 | /* Only cosmetics */ |
| 802 | 693 | memblock_enforce_memory_limit(memblock_end_of_DRAM()); | |
| 803 | /* | ||
| 804 | * Reserve memory used for lowcore/command line/kernel image. | ||
| 805 | */ | ||
| 806 | reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); | ||
| 807 | reserve_bootmem((unsigned long)_stext, | ||
| 808 | PFN_PHYS(start_pfn) - (unsigned long)_stext, | ||
| 809 | BOOTMEM_DEFAULT); | ||
| 810 | /* | ||
| 811 | * Reserve the bootmem bitmap itself as well. We do this in two | ||
| 812 | * steps (first step was init_bootmem()) because this catches | ||
| 813 | * the (very unlikely) case of us accidentally initializing the | ||
| 814 | * bootmem allocator with an invalid RAM area. | ||
| 815 | */ | ||
| 816 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, | ||
| 817 | BOOTMEM_DEFAULT); | ||
| 818 | |||
| 819 | #ifdef CONFIG_CRASH_DUMP | ||
| 820 | if (crashk_res.start) | ||
| 821 | reserve_bootmem(crashk_res.start, | ||
| 822 | crashk_res.end - crashk_res.start + 1, | ||
| 823 | BOOTMEM_DEFAULT); | ||
| 824 | if (is_kdump_kernel()) | ||
| 825 | reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, | ||
| 826 | PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); | ||
| 827 | #endif | ||
| 828 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 829 | if (INITRD_START && INITRD_SIZE) { | ||
| 830 | if (INITRD_START + INITRD_SIZE <= memory_end) { | ||
| 831 | reserve_bootmem(INITRD_START, INITRD_SIZE, | ||
| 832 | BOOTMEM_DEFAULT); | ||
| 833 | initrd_start = INITRD_START; | ||
| 834 | initrd_end = initrd_start + INITRD_SIZE; | ||
| 835 | } else { | ||
| 836 | pr_err("initrd extends beyond end of " | ||
| 837 | "memory (0x%08lx > 0x%08lx) " | ||
| 838 | "disabling initrd\n", | ||
| 839 | initrd_start + INITRD_SIZE, memory_end); | ||
| 840 | initrd_start = initrd_end = 0; | ||
| 841 | } | ||
| 842 | } | ||
| 843 | #endif | ||
| 844 | } | 694 | } |
| 845 | 695 | ||
| 846 | /* | 696 | /* |
| @@ -989,23 +839,46 @@ void __init setup_arch(char **cmdline_p) | |||
| 989 | 839 | ||
| 990 | ROOT_DEV = Root_RAM0; | 840 | ROOT_DEV = Root_RAM0; |
| 991 | 841 | ||
| 842 | /* Is init_mm really needed? */ | ||
| 992 | init_mm.start_code = PAGE_OFFSET; | 843 | init_mm.start_code = PAGE_OFFSET; |
| 993 | init_mm.end_code = (unsigned long) &_etext; | 844 | init_mm.end_code = (unsigned long) &_etext; |
| 994 | init_mm.end_data = (unsigned long) &_edata; | 845 | init_mm.end_data = (unsigned long) &_edata; |
| 995 | init_mm.brk = (unsigned long) &_end; | 846 | init_mm.brk = (unsigned long) &_end; |
| 996 | 847 | ||
| 997 | parse_early_param(); | 848 | parse_early_param(); |
| 998 | detect_memory_layout(memory_chunk, memory_end); | ||
| 999 | os_info_init(); | 849 | os_info_init(); |
| 1000 | setup_ipl(); | 850 | setup_ipl(); |
| 851 | |||
| 852 | /* Do some memory reservations *before* memory is added to memblock */ | ||
| 853 | reserve_memory_end(); | ||
| 1001 | reserve_oldmem(); | 854 | reserve_oldmem(); |
| 855 | reserve_kernel(); | ||
| 856 | reserve_initrd(); | ||
| 857 | reserve_elfcorehdr(); | ||
| 858 | memblock_allow_resize(); | ||
| 859 | |||
| 860 | /* Get information about *all* installed memory */ | ||
| 861 | detect_memory_memblock(); | ||
| 862 | |||
| 863 | remove_oldmem(); | ||
| 864 | |||
| 865 | /* | ||
| 866 | * Make sure all chunks are MAX_ORDER aligned so we don't need the | ||
| 867 | * extra checks that HOLES_IN_ZONE would require. | ||
| 868 | * | ||
| 869 | * Is this still required? | ||
| 870 | */ | ||
| 871 | memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT)); | ||
| 872 | |||
| 1002 | setup_memory_end(); | 873 | setup_memory_end(); |
| 1003 | reserve_crashkernel(); | ||
| 1004 | setup_memory(); | 874 | setup_memory(); |
| 875 | |||
| 876 | check_initrd(); | ||
| 877 | reserve_crashkernel(); | ||
| 878 | |||
| 1005 | setup_resources(); | 879 | setup_resources(); |
| 1006 | setup_vmcoreinfo(); | 880 | setup_vmcoreinfo(); |
| 1007 | setup_lowcore(); | 881 | setup_lowcore(); |
| 1008 | |||
| 1009 | smp_fill_possible_mask(); | 882 | smp_fill_possible_mask(); |
| 1010 | cpu_init(); | 883 | cpu_init(); |
| 1011 | s390_init_cpu_topology(); | 884 | s390_init_cpu_topology(); |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index d8fd508ccd1e..42b49f9e19bf 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
| @@ -113,7 +113,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
| 113 | sizeof(current->thread.fp_regs)); | 113 | sizeof(current->thread.fp_regs)); |
| 114 | 114 | ||
| 115 | restore_fp_regs(current->thread.fp_regs.fprs); | 115 | restore_fp_regs(current->thread.fp_regs.fprs); |
| 116 | clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ | 116 | clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ |
| 117 | return 0; | 117 | return 0; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| @@ -356,7 +356,7 @@ void do_signal(struct pt_regs *regs) | |||
| 356 | * call information. | 356 | * call information. |
| 357 | */ | 357 | */ |
| 358 | current_thread_info()->system_call = | 358 | current_thread_info()->system_call = |
| 359 | test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0; | 359 | test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; |
| 360 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 360 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
| 361 | 361 | ||
| 362 | if (signr > 0) { | 362 | if (signr > 0) { |
| @@ -384,7 +384,7 @@ void do_signal(struct pt_regs *regs) | |||
| 384 | } | 384 | } |
| 385 | } | 385 | } |
| 386 | /* No longer in a system call */ | 386 | /* No longer in a system call */ |
| 387 | clear_thread_flag(TIF_SYSCALL); | 387 | clear_pt_regs_flag(regs, PIF_SYSCALL); |
| 388 | 388 | ||
| 389 | if (is_compat_task()) | 389 | if (is_compat_task()) |
| 390 | handle_signal32(signr, &ka, &info, oldset, regs); | 390 | handle_signal32(signr, &ka, &info, oldset, regs); |
| @@ -394,7 +394,7 @@ void do_signal(struct pt_regs *regs) | |||
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | /* No handlers present - check for system call restart */ | 396 | /* No handlers present - check for system call restart */ |
| 397 | clear_thread_flag(TIF_SYSCALL); | 397 | clear_pt_regs_flag(regs, PIF_SYSCALL); |
| 398 | if (current_thread_info()->system_call) { | 398 | if (current_thread_info()->system_call) { |
| 399 | regs->int_code = current_thread_info()->system_call; | 399 | regs->int_code = current_thread_info()->system_call; |
| 400 | switch (regs->gprs[2]) { | 400 | switch (regs->gprs[2]) { |
| @@ -407,9 +407,9 @@ void do_signal(struct pt_regs *regs) | |||
| 407 | case -ERESTARTNOINTR: | 407 | case -ERESTARTNOINTR: |
| 408 | /* Restart system call with magic TIF bit. */ | 408 | /* Restart system call with magic TIF bit. */ |
| 409 | regs->gprs[2] = regs->orig_gpr2; | 409 | regs->gprs[2] = regs->orig_gpr2; |
| 410 | set_thread_flag(TIF_SYSCALL); | 410 | set_pt_regs_flag(regs, PIF_SYSCALL); |
| 411 | if (test_thread_flag(TIF_SINGLE_STEP)) | 411 | if (test_thread_flag(TIF_SINGLE_STEP)) |
| 412 | set_thread_flag(TIF_PER_TRAP); | 412 | clear_pt_regs_flag(regs, PIF_PER_TRAP); |
| 413 | break; | 413 | break; |
| 414 | } | 414 | } |
| 415 | } | 415 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 86e65ec3422b..243c7e512600 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -170,6 +170,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) | |||
| 170 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE | 170 | lc->panic_stack = pcpu->panic_stack + PAGE_SIZE |
| 171 | - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | 171 | - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
| 172 | lc->cpu_nr = cpu; | 172 | lc->cpu_nr = cpu; |
| 173 | lc->spinlock_lockval = arch_spin_lockval(cpu); | ||
| 173 | #ifndef CONFIG_64BIT | 174 | #ifndef CONFIG_64BIT |
| 174 | if (MACHINE_HAS_IEEE) { | 175 | if (MACHINE_HAS_IEEE) { |
| 175 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); | 176 | lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); |
| @@ -226,6 +227,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) | |||
| 226 | cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); | 227 | cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); |
| 227 | atomic_inc(&init_mm.context.attach_count); | 228 | atomic_inc(&init_mm.context.attach_count); |
| 228 | lc->cpu_nr = cpu; | 229 | lc->cpu_nr = cpu; |
| 230 | lc->spinlock_lockval = arch_spin_lockval(cpu); | ||
| 229 | lc->percpu_offset = __per_cpu_offset[cpu]; | 231 | lc->percpu_offset = __per_cpu_offset[cpu]; |
| 230 | lc->kernel_asce = S390_lowcore.kernel_asce; | 232 | lc->kernel_asce = S390_lowcore.kernel_asce; |
| 231 | lc->machine_flags = S390_lowcore.machine_flags; | 233 | lc->machine_flags = S390_lowcore.machine_flags; |
| @@ -403,15 +405,6 @@ void smp_send_stop(void) | |||
| 403 | } | 405 | } |
| 404 | 406 | ||
| 405 | /* | 407 | /* |
| 406 | * Stop the current cpu. | ||
| 407 | */ | ||
| 408 | void smp_stop_cpu(void) | ||
| 409 | { | ||
| 410 | pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); | ||
| 411 | for (;;) ; | ||
| 412 | } | ||
| 413 | |||
| 414 | /* | ||
| 415 | * This is the main routine where commands issued by other | 408 | * This is the main routine where commands issued by other |
| 416 | * cpus are handled. | 409 | * cpus are handled. |
| 417 | */ | 410 | */ |
| @@ -519,7 +512,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
| 519 | } | 512 | } |
| 520 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 513 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
| 521 | 514 | ||
| 522 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) | 515 | #ifdef CONFIG_CRASH_DUMP |
| 523 | 516 | ||
| 524 | static void __init smp_get_save_area(int cpu, u16 address) | 517 | static void __init smp_get_save_area(int cpu, u16 address) |
| 525 | { | 518 | { |
| @@ -534,14 +527,12 @@ static void __init smp_get_save_area(int cpu, u16 address) | |||
| 534 | save_area = dump_save_area_create(cpu); | 527 | save_area = dump_save_area_create(cpu); |
| 535 | if (!save_area) | 528 | if (!save_area) |
| 536 | panic("could not allocate memory for save area\n"); | 529 | panic("could not allocate memory for save area\n"); |
| 537 | #ifdef CONFIG_CRASH_DUMP | ||
| 538 | if (address == boot_cpu_address) { | 530 | if (address == boot_cpu_address) { |
| 539 | /* Copy the registers of the boot cpu. */ | 531 | /* Copy the registers of the boot cpu. */ |
| 540 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), | 532 | copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), |
| 541 | SAVE_AREA_BASE - PAGE_SIZE, 0); | 533 | SAVE_AREA_BASE - PAGE_SIZE, 0); |
| 542 | return; | 534 | return; |
| 543 | } | 535 | } |
| 544 | #endif | ||
| 545 | /* Get the registers of a non-boot cpu. */ | 536 | /* Get the registers of a non-boot cpu. */ |
| 546 | __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); | 537 | __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); |
| 547 | memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); | 538 | memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); |
| @@ -558,11 +549,11 @@ int smp_store_status(int cpu) | |||
| 558 | return 0; | 549 | return 0; |
| 559 | } | 550 | } |
| 560 | 551 | ||
| 561 | #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ | 552 | #else /* CONFIG_CRASH_DUMP */ |
| 562 | 553 | ||
| 563 | static inline void smp_get_save_area(int cpu, u16 address) { } | 554 | static inline void smp_get_save_area(int cpu, u16 address) { } |
| 564 | 555 | ||
| 565 | #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ | 556 | #endif /* CONFIG_CRASH_DUMP */ |
| 566 | 557 | ||
| 567 | void smp_cpu_set_polarization(int cpu, int val) | 558 | void smp_cpu_set_polarization(int cpu, int val) |
| 568 | { | 559 | { |
| @@ -809,6 +800,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
| 809 | void __init smp_setup_processor_id(void) | 800 | void __init smp_setup_processor_id(void) |
| 810 | { | 801 | { |
| 811 | S390_lowcore.cpu_nr = 0; | 802 | S390_lowcore.cpu_nr = 0; |
| 803 | S390_lowcore.spinlock_lockval = arch_spin_lockval(0); | ||
| 812 | } | 804 | } |
| 813 | 805 | ||
| 814 | /* | 806 | /* |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 386d37a228bb..0931b110c826 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
| @@ -226,7 +226,7 @@ void update_vsyscall(struct timekeeper *tk) | |||
| 226 | vdso_data->wtom_clock_sec = | 226 | vdso_data->wtom_clock_sec = |
| 227 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; | 227 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; |
| 228 | vdso_data->wtom_clock_nsec = tk->xtime_nsec + | 228 | vdso_data->wtom_clock_nsec = tk->xtime_nsec + |
| 229 | + (tk->wall_to_monotonic.tv_nsec << tk->shift); | 229 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); |
| 230 | nsecps = (u64) NSEC_PER_SEC << tk->shift; | 230 | nsecps = (u64) NSEC_PER_SEC << tk->shift; |
| 231 | while (vdso_data->wtom_clock_nsec >= nsecps) { | 231 | while (vdso_data->wtom_clock_nsec >= nsecps) { |
| 232 | vdso_data->wtom_clock_nsec -= nsecps; | 232 | vdso_data->wtom_clock_nsec -= nsecps; |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 6298fed11ced..fa3b8cdaadac 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
| @@ -333,7 +333,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, | |||
| 333 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; | 333 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; |
| 334 | nr_masks = max(nr_masks, 1); | 334 | nr_masks = max(nr_masks, 1); |
| 335 | for (i = 0; i < nr_masks; i++) { | 335 | for (i = 0; i < nr_masks; i++) { |
| 336 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | 336 | mask->next = alloc_bootmem_align( |
| 337 | roundup_pow_of_two(sizeof(struct mask_info)), | ||
| 338 | roundup_pow_of_two(sizeof(struct mask_info))); | ||
| 337 | mask = mask->next; | 339 | mask = mask->next; |
| 338 | } | 340 | } |
| 339 | } | 341 | } |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 9ae6664ff08c..825fe7bf95a6 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -907,7 +907,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) | |||
| 907 | if (need_resched()) | 907 | if (need_resched()) |
| 908 | schedule(); | 908 | schedule(); |
| 909 | 909 | ||
| 910 | if (test_thread_flag(TIF_MCCK_PENDING)) | 910 | if (test_cpu_flag(CIF_MCCK_PENDING)) |
| 911 | s390_handle_mcck(); | 911 | s390_handle_mcck(); |
| 912 | 912 | ||
| 913 | if (!kvm_is_ucontrol(vcpu->kvm)) | 913 | if (!kvm_is_ucontrol(vcpu->kvm)) |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f709983f41f8..5b0e445bc3f3 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
| @@ -26,83 +26,81 @@ __setup("spin_retry=", spin_retry_setup); | |||
| 26 | 26 | ||
| 27 | void arch_spin_lock_wait(arch_spinlock_t *lp) | 27 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
| 28 | { | 28 | { |
| 29 | int count = spin_retry; | 29 | unsigned int cpu = SPINLOCK_LOCKVAL; |
| 30 | unsigned int cpu = ~smp_processor_id(); | ||
| 31 | unsigned int owner; | 30 | unsigned int owner; |
| 31 | int count; | ||
| 32 | 32 | ||
| 33 | while (1) { | 33 | while (1) { |
| 34 | owner = lp->owner_cpu; | 34 | owner = ACCESS_ONCE(lp->lock); |
| 35 | if (!owner || smp_vcpu_scheduled(~owner)) { | 35 | /* Try to get the lock if it is free. */ |
| 36 | for (count = spin_retry; count > 0; count--) { | 36 | if (!owner) { |
| 37 | if (arch_spin_is_locked(lp)) | 37 | if (_raw_compare_and_swap(&lp->lock, 0, cpu)) |
| 38 | continue; | 38 | return; |
| 39 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, | 39 | continue; |
| 40 | cpu) == 0) | ||
| 41 | return; | ||
| 42 | } | ||
| 43 | if (MACHINE_IS_LPAR) | ||
| 44 | continue; | ||
| 45 | } | 40 | } |
| 46 | owner = lp->owner_cpu; | 41 | /* Check if the lock owner is running. */ |
| 47 | if (owner) | 42 | if (!smp_vcpu_scheduled(~owner)) { |
| 43 | smp_yield_cpu(~owner); | ||
| 44 | continue; | ||
| 45 | } | ||
| 46 | /* Loop for a while on the lock value. */ | ||
| 47 | count = spin_retry; | ||
| 48 | do { | ||
| 49 | owner = ACCESS_ONCE(lp->lock); | ||
| 50 | } while (owner && count-- > 0); | ||
| 51 | if (!owner) | ||
| 52 | continue; | ||
| 53 | /* | ||
| 54 | * For multiple layers of hypervisors, e.g. z/VM + LPAR | ||
| 55 | * yield the CPU if the lock is still unavailable. | ||
| 56 | */ | ||
| 57 | if (!MACHINE_IS_LPAR) | ||
| 48 | smp_yield_cpu(~owner); | 58 | smp_yield_cpu(~owner); |
| 49 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | ||
| 50 | return; | ||
| 51 | } | 59 | } |
| 52 | } | 60 | } |
| 53 | EXPORT_SYMBOL(arch_spin_lock_wait); | 61 | EXPORT_SYMBOL(arch_spin_lock_wait); |
| 54 | 62 | ||
| 55 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | 63 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
| 56 | { | 64 | { |
| 57 | int count = spin_retry; | 65 | unsigned int cpu = SPINLOCK_LOCKVAL; |
| 58 | unsigned int cpu = ~smp_processor_id(); | ||
| 59 | unsigned int owner; | 66 | unsigned int owner; |
| 67 | int count; | ||
| 60 | 68 | ||
| 61 | local_irq_restore(flags); | 69 | local_irq_restore(flags); |
| 62 | while (1) { | 70 | while (1) { |
| 63 | owner = lp->owner_cpu; | 71 | owner = ACCESS_ONCE(lp->lock); |
| 64 | if (!owner || smp_vcpu_scheduled(~owner)) { | 72 | /* Try to get the lock if it is free. */ |
| 65 | for (count = spin_retry; count > 0; count--) { | 73 | if (!owner) { |
| 66 | if (arch_spin_is_locked(lp)) | 74 | local_irq_disable(); |
| 67 | continue; | 75 | if (_raw_compare_and_swap(&lp->lock, 0, cpu)) |
| 68 | local_irq_disable(); | 76 | return; |
| 69 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, | 77 | local_irq_restore(flags); |
| 70 | cpu) == 0) | ||
| 71 | return; | ||
| 72 | local_irq_restore(flags); | ||
| 73 | } | ||
| 74 | if (MACHINE_IS_LPAR) | ||
| 75 | continue; | ||
| 76 | } | 78 | } |
| 77 | owner = lp->owner_cpu; | 79 | /* Check if the lock owner is running. */ |
| 78 | if (owner) | 80 | if (!smp_vcpu_scheduled(~owner)) { |
| 79 | smp_yield_cpu(~owner); | 81 | smp_yield_cpu(~owner); |
| 80 | local_irq_disable(); | ||
| 81 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | ||
| 82 | return; | ||
| 83 | local_irq_restore(flags); | ||
| 84 | } | ||
| 85 | } | ||
| 86 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); | ||
| 87 | |||
| 88 | int arch_spin_trylock_retry(arch_spinlock_t *lp) | ||
| 89 | { | ||
| 90 | unsigned int cpu = ~smp_processor_id(); | ||
| 91 | int count; | ||
| 92 | |||
| 93 | for (count = spin_retry; count > 0; count--) { | ||
| 94 | if (arch_spin_is_locked(lp)) | ||
| 95 | continue; | 82 | continue; |
| 96 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 83 | } |
| 97 | return 1; | 84 | /* Loop for a while on the lock value. */ |
| 85 | count = spin_retry; | ||
| 86 | do { | ||
| 87 | owner = ACCESS_ONCE(lp->lock); | ||
| 88 | } while (owner && count-- > 0); | ||
| 89 | if (!owner) | ||
| 90 | continue; | ||
| 91 | /* | ||
| 92 | * For multiple layers of hypervisors, e.g. z/VM + LPAR | ||
| 93 | * yield the CPU if the lock is still unavailable. | ||
| 94 | */ | ||
| 95 | if (!MACHINE_IS_LPAR) | ||
| 96 | smp_yield_cpu(~owner); | ||
| 98 | } | 97 | } |
| 99 | return 0; | ||
| 100 | } | 98 | } |
| 101 | EXPORT_SYMBOL(arch_spin_trylock_retry); | 99 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); |
| 102 | 100 | ||
| 103 | void arch_spin_relax(arch_spinlock_t *lock) | 101 | void arch_spin_relax(arch_spinlock_t *lp) |
| 104 | { | 102 | { |
| 105 | unsigned int cpu = lock->owner_cpu; | 103 | unsigned int cpu = lp->lock; |
| 106 | if (cpu != 0) { | 104 | if (cpu != 0) { |
| 107 | if (MACHINE_IS_VM || MACHINE_IS_KVM || | 105 | if (MACHINE_IS_VM || MACHINE_IS_KVM || |
| 108 | !smp_vcpu_scheduled(~cpu)) | 106 | !smp_vcpu_scheduled(~cpu)) |
| @@ -111,6 +109,17 @@ void arch_spin_relax(arch_spinlock_t *lock) | |||
| 111 | } | 109 | } |
| 112 | EXPORT_SYMBOL(arch_spin_relax); | 110 | EXPORT_SYMBOL(arch_spin_relax); |
| 113 | 111 | ||
| 112 | int arch_spin_trylock_retry(arch_spinlock_t *lp) | ||
| 113 | { | ||
| 114 | int count; | ||
| 115 | |||
| 116 | for (count = spin_retry; count > 0; count--) | ||
| 117 | if (arch_spin_trylock_once(lp)) | ||
| 118 | return 1; | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | EXPORT_SYMBOL(arch_spin_trylock_retry); | ||
| 122 | |||
| 114 | void _raw_read_lock_wait(arch_rwlock_t *rw) | 123 | void _raw_read_lock_wait(arch_rwlock_t *rw) |
| 115 | { | 124 | { |
| 116 | unsigned int old; | 125 | unsigned int old; |
| @@ -121,10 +130,10 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) | |||
| 121 | smp_yield(); | 130 | smp_yield(); |
| 122 | count = spin_retry; | 131 | count = spin_retry; |
| 123 | } | 132 | } |
| 124 | if (!arch_read_can_lock(rw)) | 133 | old = ACCESS_ONCE(rw->lock); |
| 134 | if ((int) old < 0) | ||
| 125 | continue; | 135 | continue; |
| 126 | old = rw->lock & 0x7fffffffU; | 136 | if (_raw_compare_and_swap(&rw->lock, old, old + 1)) |
| 127 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | ||
| 128 | return; | 137 | return; |
| 129 | } | 138 | } |
| 130 | } | 139 | } |
| @@ -141,12 +150,13 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | |||
| 141 | smp_yield(); | 150 | smp_yield(); |
| 142 | count = spin_retry; | 151 | count = spin_retry; |
| 143 | } | 152 | } |
| 144 | if (!arch_read_can_lock(rw)) | 153 | old = ACCESS_ONCE(rw->lock); |
| 154 | if ((int) old < 0) | ||
| 145 | continue; | 155 | continue; |
| 146 | old = rw->lock & 0x7fffffffU; | ||
| 147 | local_irq_disable(); | 156 | local_irq_disable(); |
| 148 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1)) |
| 149 | return; | 158 | return; |
| 159 | local_irq_restore(flags); | ||
| 150 | } | 160 | } |
| 151 | } | 161 | } |
| 152 | EXPORT_SYMBOL(_raw_read_lock_wait_flags); | 162 | EXPORT_SYMBOL(_raw_read_lock_wait_flags); |
| @@ -157,10 +167,10 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) | |||
| 157 | int count = spin_retry; | 167 | int count = spin_retry; |
| 158 | 168 | ||
| 159 | while (count-- > 0) { | 169 | while (count-- > 0) { |
| 160 | if (!arch_read_can_lock(rw)) | 170 | old = ACCESS_ONCE(rw->lock); |
| 171 | if ((int) old < 0) | ||
| 161 | continue; | 172 | continue; |
| 162 | old = rw->lock & 0x7fffffffU; | 173 | if (_raw_compare_and_swap(&rw->lock, old, old + 1)) |
| 163 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | ||
| 164 | return 1; | 174 | return 1; |
| 165 | } | 175 | } |
| 166 | return 0; | 176 | return 0; |
| @@ -169,6 +179,7 @@ EXPORT_SYMBOL(_raw_read_trylock_retry); | |||
| 169 | 179 | ||
| 170 | void _raw_write_lock_wait(arch_rwlock_t *rw) | 180 | void _raw_write_lock_wait(arch_rwlock_t *rw) |
| 171 | { | 181 | { |
| 182 | unsigned int old; | ||
| 172 | int count = spin_retry; | 183 | int count = spin_retry; |
| 173 | 184 | ||
| 174 | while (1) { | 185 | while (1) { |
| @@ -176,9 +187,10 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) | |||
| 176 | smp_yield(); | 187 | smp_yield(); |
| 177 | count = spin_retry; | 188 | count = spin_retry; |
| 178 | } | 189 | } |
| 179 | if (!arch_write_can_lock(rw)) | 190 | old = ACCESS_ONCE(rw->lock); |
| 191 | if (old) | ||
| 180 | continue; | 192 | continue; |
| 181 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 193 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) |
| 182 | return; | 194 | return; |
| 183 | } | 195 | } |
| 184 | } | 196 | } |
| @@ -186,6 +198,7 @@ EXPORT_SYMBOL(_raw_write_lock_wait); | |||
| 186 | 198 | ||
| 187 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | 199 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
| 188 | { | 200 | { |
| 201 | unsigned int old; | ||
| 189 | int count = spin_retry; | 202 | int count = spin_retry; |
| 190 | 203 | ||
| 191 | local_irq_restore(flags); | 204 | local_irq_restore(flags); |
| @@ -194,23 +207,27 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | |||
| 194 | smp_yield(); | 207 | smp_yield(); |
| 195 | count = spin_retry; | 208 | count = spin_retry; |
| 196 | } | 209 | } |
| 197 | if (!arch_write_can_lock(rw)) | 210 | old = ACCESS_ONCE(rw->lock); |
| 211 | if (old) | ||
| 198 | continue; | 212 | continue; |
| 199 | local_irq_disable(); | 213 | local_irq_disable(); |
| 200 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 214 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) |
| 201 | return; | 215 | return; |
| 216 | local_irq_restore(flags); | ||
| 202 | } | 217 | } |
| 203 | } | 218 | } |
| 204 | EXPORT_SYMBOL(_raw_write_lock_wait_flags); | 219 | EXPORT_SYMBOL(_raw_write_lock_wait_flags); |
| 205 | 220 | ||
| 206 | int _raw_write_trylock_retry(arch_rwlock_t *rw) | 221 | int _raw_write_trylock_retry(arch_rwlock_t *rw) |
| 207 | { | 222 | { |
| 223 | unsigned int old; | ||
| 208 | int count = spin_retry; | 224 | int count = spin_retry; |
| 209 | 225 | ||
| 210 | while (count-- > 0) { | 226 | while (count-- > 0) { |
| 211 | if (!arch_write_can_lock(rw)) | 227 | old = ACCESS_ONCE(rw->lock); |
| 228 | if (old) | ||
| 212 | continue; | 229 | continue; |
| 213 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 230 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) |
| 214 | return 1; | 231 | return 1; |
| 215 | } | 232 | } |
| 216 | return 0; | 233 | return 0; |
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 7416efe8eae4..53dd5d7a0c96 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c | |||
| @@ -76,7 +76,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, | |||
| 76 | { | 76 | { |
| 77 | unsigned long tmp1, tmp2; | 77 | unsigned long tmp1, tmp2; |
| 78 | 78 | ||
| 79 | update_primary_asce(current); | 79 | load_kernel_asce(); |
| 80 | tmp1 = -256UL; | 80 | tmp1 = -256UL; |
| 81 | asm volatile( | 81 | asm volatile( |
| 82 | " sacf 0\n" | 82 | " sacf 0\n" |
| @@ -159,7 +159,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, | |||
| 159 | { | 159 | { |
| 160 | unsigned long tmp1, tmp2; | 160 | unsigned long tmp1, tmp2; |
| 161 | 161 | ||
| 162 | update_primary_asce(current); | 162 | load_kernel_asce(); |
| 163 | tmp1 = -256UL; | 163 | tmp1 = -256UL; |
| 164 | asm volatile( | 164 | asm volatile( |
| 165 | " sacf 0\n" | 165 | " sacf 0\n" |
| @@ -225,7 +225,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user | |||
| 225 | { | 225 | { |
| 226 | unsigned long tmp1; | 226 | unsigned long tmp1; |
| 227 | 227 | ||
| 228 | update_primary_asce(current); | 228 | load_kernel_asce(); |
| 229 | asm volatile( | 229 | asm volatile( |
| 230 | " sacf 256\n" | 230 | " sacf 256\n" |
| 231 | " "AHI" %0,-1\n" | 231 | " "AHI" %0,-1\n" |
| @@ -292,7 +292,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size) | |||
| 292 | { | 292 | { |
| 293 | unsigned long tmp1, tmp2; | 293 | unsigned long tmp1, tmp2; |
| 294 | 294 | ||
| 295 | update_primary_asce(current); | 295 | load_kernel_asce(); |
| 296 | asm volatile( | 296 | asm volatile( |
| 297 | " sacf 256\n" | 297 | " sacf 256\n" |
| 298 | " "AHI" %0,-1\n" | 298 | " "AHI" %0,-1\n" |
| @@ -358,7 +358,7 @@ unsigned long __strnlen_user(const char __user *src, unsigned long size) | |||
| 358 | { | 358 | { |
| 359 | if (unlikely(!size)) | 359 | if (unlikely(!size)) |
| 360 | return 0; | 360 | return 0; |
| 361 | update_primary_asce(current); | 361 | load_kernel_asce(); |
| 362 | return strnlen_user_srst(src, size); | 362 | return strnlen_user_srst(src, size); |
| 363 | } | 363 | } |
| 364 | EXPORT_SYMBOL(__strnlen_user); | 364 | EXPORT_SYMBOL(__strnlen_user); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2f51a998a67e..3f3b35403d0a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
| @@ -415,7 +415,7 @@ static inline int do_exception(struct pt_regs *regs, int access) | |||
| 415 | * The instruction that caused the program check has | 415 | * The instruction that caused the program check has |
| 416 | * been nullified. Don't signal single step via SIGTRAP. | 416 | * been nullified. Don't signal single step via SIGTRAP. |
| 417 | */ | 417 | */ |
| 418 | clear_tsk_thread_flag(tsk, TIF_PER_TRAP); | 418 | clear_pt_regs_flag(regs, PIF_PER_TRAP); |
| 419 | 419 | ||
| 420 | if (notify_page_fault(regs)) | 420 | if (notify_page_fault(regs)) |
| 421 | return 0; | 421 | return 0; |
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index cca388253a39..5535cfe0ee11 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c | |||
| @@ -6,130 +6,60 @@ | |||
| 6 | 6 | ||
| 7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | #include <linux/memblock.h> | ||
| 10 | #include <linux/init.h> | ||
| 11 | #include <linux/debugfs.h> | ||
| 12 | #include <linux/seq_file.h> | ||
| 9 | #include <asm/ipl.h> | 13 | #include <asm/ipl.h> |
| 10 | #include <asm/sclp.h> | 14 | #include <asm/sclp.h> |
| 11 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
| 12 | 16 | ||
| 13 | #define ADDR2G (1ULL << 31) | 17 | #define ADDR2G (1ULL << 31) |
| 14 | 18 | ||
| 15 | static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize) | 19 | #define CHUNK_READ_WRITE 0 |
| 20 | #define CHUNK_READ_ONLY 1 | ||
| 21 | |||
| 22 | static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size) | ||
| 23 | { | ||
| 24 | memblock_add_range(&memblock.memory, start, size, 0, 0); | ||
| 25 | memblock_add_range(&memblock.physmem, start, size, 0, 0); | ||
| 26 | } | ||
| 27 | |||
| 28 | void __init detect_memory_memblock(void) | ||
| 16 | { | 29 | { |
| 17 | unsigned long long memsize, rnmax, rzm; | 30 | unsigned long long memsize, rnmax, rzm; |
| 18 | unsigned long addr = 0, size; | 31 | unsigned long addr, size; |
| 19 | int i = 0, type; | 32 | int type; |
| 20 | 33 | ||
| 21 | rzm = sclp_get_rzm(); | 34 | rzm = sclp_get_rzm(); |
| 22 | rnmax = sclp_get_rnmax(); | 35 | rnmax = sclp_get_rnmax(); |
| 23 | memsize = rzm * rnmax; | 36 | memsize = rzm * rnmax; |
| 24 | if (!rzm) | 37 | if (!rzm) |
| 25 | rzm = 1ULL << 17; | 38 | rzm = 1ULL << 17; |
| 26 | if (sizeof(long) == 4) { | 39 | if (IS_ENABLED(CONFIG_32BIT)) { |
| 27 | rzm = min(ADDR2G, rzm); | 40 | rzm = min(ADDR2G, rzm); |
| 28 | memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; | 41 | memsize = min(ADDR2G, memsize); |
| 29 | } | 42 | } |
| 30 | if (maxsize) | 43 | max_physmem_end = memsize; |
| 31 | memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize; | 44 | addr = 0; |
| 45 | /* keep memblock lists close to the kernel */ | ||
| 46 | memblock_set_bottom_up(true); | ||
| 32 | do { | 47 | do { |
| 33 | size = 0; | 48 | size = 0; |
| 34 | type = tprot(addr); | 49 | type = tprot(addr); |
| 35 | do { | 50 | do { |
| 36 | size += rzm; | 51 | size += rzm; |
| 37 | if (memsize && addr + size >= memsize) | 52 | if (max_physmem_end && addr + size >= max_physmem_end) |
| 38 | break; | 53 | break; |
| 39 | } while (type == tprot(addr + size)); | 54 | } while (type == tprot(addr + size)); |
| 40 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { | 55 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { |
| 41 | if (memsize && (addr + size > memsize)) | 56 | if (max_physmem_end && (addr + size > max_physmem_end)) |
| 42 | size = memsize - addr; | 57 | size = max_physmem_end - addr; |
| 43 | chunk[i].addr = addr; | 58 | memblock_physmem_add(addr, size); |
| 44 | chunk[i].size = size; | ||
| 45 | chunk[i].type = type; | ||
| 46 | i++; | ||
| 47 | } | 59 | } |
| 48 | addr += size; | 60 | addr += size; |
| 49 | } while (addr < memsize && i < MEMORY_CHUNKS); | 61 | } while (addr < max_physmem_end); |
| 50 | } | 62 | memblock_set_bottom_up(false); |
| 51 | 63 | if (!max_physmem_end) | |
| 52 | /** | 64 | max_physmem_end = memblock_end_of_DRAM(); |
| 53 | * detect_memory_layout - fill mem_chunk array with memory layout data | ||
| 54 | * @chunk: mem_chunk array to be filled | ||
| 55 | * @maxsize: maximum address where memory detection should stop | ||
| 56 | * | ||
| 57 | * Fills the passed in memory chunk array with the memory layout of the | ||
| 58 | * machine. The array must have a size of at least MEMORY_CHUNKS and will | ||
| 59 | * be fully initialized afterwards. | ||
| 60 | * If the maxsize paramater has a value > 0 memory detection will stop at | ||
| 61 | * that address. It is guaranteed that all chunks have an ending address | ||
| 62 | * that is smaller than maxsize. | ||
| 63 | * If maxsize is 0 all memory will be detected. | ||
| 64 | */ | ||
| 65 | void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize) | ||
| 66 | { | ||
| 67 | unsigned long flags, flags_dat, cr0; | ||
| 68 | |||
| 69 | memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
| 70 | /* | ||
| 71 | * Disable IRQs, DAT and low address protection so tprot does the | ||
| 72 | * right thing and we don't get scheduled away with low address | ||
| 73 | * protection disabled. | ||
| 74 | */ | ||
| 75 | local_irq_save(flags); | ||
| 76 | flags_dat = __arch_local_irq_stnsm(0xfb); | ||
| 77 | /* | ||
| 78 | * In case DAT was enabled, make sure chunk doesn't reside in vmalloc | ||
| 79 | * space. We have disabled DAT and any access to vmalloc area will | ||
| 80 | * cause an exception. | ||
| 81 | * If DAT was disabled we are called from early ipl code. | ||
| 82 | */ | ||
| 83 | if (test_bit(5, &flags_dat)) { | ||
| 84 | if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk))) | ||
| 85 | goto out; | ||
| 86 | } | ||
| 87 | __ctl_store(cr0, 0, 0); | ||
| 88 | __ctl_clear_bit(0, 28); | ||
| 89 | find_memory_chunks(chunk, maxsize); | ||
| 90 | __ctl_load(cr0, 0, 0); | ||
| 91 | out: | ||
| 92 | __arch_local_irq_ssm(flags_dat); | ||
| 93 | local_irq_restore(flags); | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL(detect_memory_layout); | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Create memory hole with given address and size. | ||
| 99 | */ | ||
| 100 | void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | ||
| 101 | unsigned long size) | ||
| 102 | { | ||
| 103 | int i; | ||
| 104 | |||
| 105 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 106 | struct mem_chunk *chunk = &mem_chunk[i]; | ||
| 107 | |||
| 108 | if (chunk->size == 0) | ||
| 109 | continue; | ||
| 110 | if (addr > chunk->addr + chunk->size) | ||
| 111 | continue; | ||
| 112 | if (addr + size <= chunk->addr) | ||
| 113 | continue; | ||
| 114 | /* Split */ | ||
| 115 | if ((addr > chunk->addr) && | ||
| 116 | (addr + size < chunk->addr + chunk->size)) { | ||
| 117 | struct mem_chunk *new = chunk + 1; | ||
| 118 | |||
| 119 | memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new)); | ||
| 120 | new->addr = addr + size; | ||
| 121 | new->size = chunk->addr + chunk->size - new->addr; | ||
| 122 | chunk->size = addr - chunk->addr; | ||
| 123 | continue; | ||
| 124 | } else if ((addr <= chunk->addr) && | ||
| 125 | (addr + size >= chunk->addr + chunk->size)) { | ||
| 126 | memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk)); | ||
| 127 | memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk)); | ||
| 128 | } else if (addr + size < chunk->addr + chunk->size) { | ||
| 129 | chunk->size = chunk->addr + chunk->size - addr - size; | ||
| 130 | chunk->addr = addr + size; | ||
| 131 | } else if (addr > chunk->addr) { | ||
| 132 | chunk->size = addr - chunk->addr; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | } | 65 | } |
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index 27c50f4d90cb..a90d45e9dfb0 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c | |||
| @@ -12,8 +12,6 @@ | |||
| 12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
| 13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <asm/setup.h> | ||
| 16 | #include <asm/ipl.h> | ||
| 17 | 15 | ||
| 18 | #define ESSA_SET_STABLE 1 | 16 | #define ESSA_SET_STABLE 1 |
| 19 | #define ESSA_SET_UNUSED 2 | 17 | #define ESSA_SET_UNUSED 2 |
| @@ -43,14 +41,6 @@ void __init cmma_init(void) | |||
| 43 | 41 | ||
| 44 | if (!cmma_flag) | 42 | if (!cmma_flag) |
| 45 | return; | 43 | return; |
| 46 | /* | ||
| 47 | * Disable CMM for dump, otherwise the tprot based memory | ||
| 48 | * detection can fail because of unstable pages. | ||
| 49 | */ | ||
| 50 | if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) { | ||
| 51 | cmma_flag = 0; | ||
| 52 | return; | ||
| 53 | } | ||
| 54 | asm volatile( | 44 | asm volatile( |
| 55 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" | 45 | " .insn rrf,0xb9ab0000,%1,%1,0,0\n" |
| 56 | "0: la %0,0\n" | 46 | "0: la %0,0\n" |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index d7cfd57815fb..7881d4eb8b6b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
| @@ -53,8 +53,10 @@ static void __crst_table_upgrade(void *arg) | |||
| 53 | { | 53 | { |
| 54 | struct mm_struct *mm = arg; | 54 | struct mm_struct *mm = arg; |
| 55 | 55 | ||
| 56 | if (current->active_mm == mm) | 56 | if (current->active_mm == mm) { |
| 57 | update_user_asce(mm, 1); | 57 | clear_user_asce(); |
| 58 | set_user_asce(mm); | ||
| 59 | } | ||
| 58 | __tlb_flush_local(); | 60 | __tlb_flush_local(); |
| 59 | } | 61 | } |
| 60 | 62 | ||
| @@ -108,7 +110,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
| 108 | pgd_t *pgd; | 110 | pgd_t *pgd; |
| 109 | 111 | ||
| 110 | if (current->active_mm == mm) { | 112 | if (current->active_mm == mm) { |
| 111 | clear_user_asce(mm, 1); | 113 | clear_user_asce(); |
| 112 | __tlb_flush_mm(mm); | 114 | __tlb_flush_mm(mm); |
| 113 | } | 115 | } |
| 114 | while (mm->context.asce_limit > limit) { | 116 | while (mm->context.asce_limit > limit) { |
| @@ -134,7 +136,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
| 134 | crst_table_free(mm, (unsigned long *) pgd); | 136 | crst_table_free(mm, (unsigned long *) pgd); |
| 135 | } | 137 | } |
| 136 | if (current->active_mm == mm) | 138 | if (current->active_mm == mm) |
| 137 | update_user_asce(mm, 1); | 139 | set_user_asce(mm); |
| 138 | } | 140 | } |
| 139 | #endif | 141 | #endif |
| 140 | 142 | ||
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 72b04de18283..fe9012a49aa5 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 11 | #include <linux/hugetlb.h> | 11 | #include <linux/hugetlb.h> |
| 12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 13 | #include <linux/memblock.h> | ||
| 13 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
| 14 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
| 15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
| @@ -66,7 +67,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) | |||
| 66 | if (slab_is_available()) | 67 | if (slab_is_available()) |
| 67 | pte = (pte_t *) page_table_alloc(&init_mm, address); | 68 | pte = (pte_t *) page_table_alloc(&init_mm, address); |
| 68 | else | 69 | else |
| 69 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); | 70 | pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), |
| 71 | PTRS_PER_PTE * sizeof(pte_t)); | ||
| 70 | if (!pte) | 72 | if (!pte) |
| 71 | return NULL; | 73 | return NULL; |
| 72 | clear_table((unsigned long *) pte, _PAGE_INVALID, | 74 | clear_table((unsigned long *) pte, _PAGE_INVALID, |
| @@ -371,16 +373,14 @@ out: | |||
| 371 | void __init vmem_map_init(void) | 373 | void __init vmem_map_init(void) |
| 372 | { | 374 | { |
| 373 | unsigned long ro_start, ro_end; | 375 | unsigned long ro_start, ro_end; |
| 374 | unsigned long start, end; | 376 | struct memblock_region *reg; |
| 375 | int i; | 377 | phys_addr_t start, end; |
| 376 | 378 | ||
| 377 | ro_start = PFN_ALIGN((unsigned long)&_stext); | 379 | ro_start = PFN_ALIGN((unsigned long)&_stext); |
| 378 | ro_end = (unsigned long)&_eshared & PAGE_MASK; | 380 | ro_end = (unsigned long)&_eshared & PAGE_MASK; |
| 379 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 381 | for_each_memblock(memory, reg) { |
| 380 | if (!memory_chunk[i].size) | 382 | start = reg->base; |
| 381 | continue; | 383 | end = reg->base + reg->size - 1; |
| 382 | start = memory_chunk[i].addr; | ||
| 383 | end = memory_chunk[i].addr + memory_chunk[i].size; | ||
| 384 | if (start >= ro_end || end <= ro_start) | 384 | if (start >= ro_end || end <= ro_start) |
| 385 | vmem_add_mem(start, end - start, 0); | 385 | vmem_add_mem(start, end - start, 0); |
| 386 | else if (start >= ro_start && end <= ro_end) | 386 | else if (start >= ro_start && end <= ro_end) |
| @@ -400,23 +400,21 @@ void __init vmem_map_init(void) | |||
| 400 | } | 400 | } |
| 401 | 401 | ||
| 402 | /* | 402 | /* |
| 403 | * Convert memory chunk array to a memory segment list so there is a single | 403 | * Convert memblock.memory to a memory segment list so there is a single |
| 404 | * list that contains both r/w memory and shared memory segments. | 404 | * list that contains all memory segments. |
| 405 | */ | 405 | */ |
| 406 | static int __init vmem_convert_memory_chunk(void) | 406 | static int __init vmem_convert_memory_chunk(void) |
| 407 | { | 407 | { |
| 408 | struct memblock_region *reg; | ||
| 408 | struct memory_segment *seg; | 409 | struct memory_segment *seg; |
| 409 | int i; | ||
| 410 | 410 | ||
| 411 | mutex_lock(&vmem_mutex); | 411 | mutex_lock(&vmem_mutex); |
| 412 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 412 | for_each_memblock(memory, reg) { |
| 413 | if (!memory_chunk[i].size) | ||
| 414 | continue; | ||
| 415 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | 413 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 416 | if (!seg) | 414 | if (!seg) |
| 417 | panic("Out of memory...\n"); | 415 | panic("Out of memory...\n"); |
| 418 | seg->start = memory_chunk[i].addr; | 416 | seg->start = reg->base; |
| 419 | seg->size = memory_chunk[i].size; | 417 | seg->size = reg->size; |
| 420 | insert_memory_segment(seg); | 418 | insert_memory_segment(seg); |
| 421 | } | 419 | } |
| 422 | mutex_unlock(&vmem_mutex); | 420 | mutex_unlock(&vmem_mutex); |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 276f2e26c761..e53c6f268807 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
| @@ -209,13 +209,11 @@ static void init_all_cpu_buffers(void) | |||
| 209 | } | 209 | } |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | static int prepare_cpu_buffers(void) | 212 | static void prepare_cpu_buffers(void) |
| 213 | { | 213 | { |
| 214 | int cpu; | ||
| 215 | int rc; | ||
| 216 | struct hws_cpu_buffer *cb; | 214 | struct hws_cpu_buffer *cb; |
| 215 | int cpu; | ||
| 217 | 216 | ||
| 218 | rc = 0; | ||
| 219 | for_each_online_cpu(cpu) { | 217 | for_each_online_cpu(cpu) { |
| 220 | cb = &per_cpu(sampler_cpu_buffer, cpu); | 218 | cb = &per_cpu(sampler_cpu_buffer, cpu); |
| 221 | atomic_set(&cb->ext_params, 0); | 219 | atomic_set(&cb->ext_params, 0); |
| @@ -230,8 +228,6 @@ static int prepare_cpu_buffers(void) | |||
| 230 | cb->oom = 0; | 228 | cb->oom = 0; |
| 231 | cb->stop_mode = 0; | 229 | cb->stop_mode = 0; |
| 232 | } | 230 | } |
| 233 | |||
| 234 | return rc; | ||
| 235 | } | 231 | } |
| 236 | 232 | ||
| 237 | /* | 233 | /* |
| @@ -1107,9 +1103,7 @@ int hwsampler_start_all(unsigned long rate) | |||
| 1107 | if (rc) | 1103 | if (rc) |
| 1108 | goto start_all_exit; | 1104 | goto start_all_exit; |
| 1109 | 1105 | ||
| 1110 | rc = prepare_cpu_buffers(); | 1106 | prepare_cpu_buffers(); |
| 1111 | if (rc) | ||
| 1112 | goto start_all_exit; | ||
| 1113 | 1107 | ||
| 1114 | for_each_online_cpu(cpu) { | 1108 | for_each_online_cpu(cpu) { |
| 1115 | rc = start_sampling(cpu); | 1109 | rc = start_sampling(cpu); |
| @@ -1156,7 +1150,7 @@ int hwsampler_stop_all(void) | |||
| 1156 | rc = 0; | 1150 | rc = 0; |
| 1157 | if (hws_state == HWS_INIT) { | 1151 | if (hws_state == HWS_INIT) { |
| 1158 | mutex_unlock(&hws_sem); | 1152 | mutex_unlock(&hws_sem); |
| 1159 | return rc; | 1153 | return 0; |
| 1160 | } | 1154 | } |
| 1161 | hws_state = HWS_STOPPING; | 1155 | hws_state = HWS_STOPPING; |
| 1162 | mutex_unlock(&hws_sem); | 1156 | mutex_unlock(&hws_sem); |
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index c747394029ee..96545d7659fd 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c | |||
| @@ -114,6 +114,16 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev, | |||
| 114 | zdev->end_dma = response->edma; | 114 | zdev->end_dma = response->edma; |
| 115 | zdev->pchid = response->pchid; | 115 | zdev->pchid = response->pchid; |
| 116 | zdev->pfgid = response->pfgid; | 116 | zdev->pfgid = response->pfgid; |
| 117 | zdev->pft = response->pft; | ||
| 118 | zdev->vfn = response->vfn; | ||
| 119 | zdev->uid = response->uid; | ||
| 120 | |||
| 121 | memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip)); | ||
| 122 | if (response->util_str_avail) { | ||
| 123 | memcpy(zdev->util_str, response->util_str, | ||
| 124 | sizeof(zdev->util_str)); | ||
| 125 | } | ||
| 126 | |||
| 117 | return 0; | 127 | return 0; |
| 118 | } | 128 | } |
| 119 | 129 | ||
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 01e251b1da0c..6d7f5a3016ca 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
| @@ -76,7 +76,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) | |||
| 76 | 76 | ||
| 77 | switch (ccdf->pec) { | 77 | switch (ccdf->pec) { |
| 78 | case 0x0301: /* Standby -> Configured */ | 78 | case 0x0301: /* Standby -> Configured */ |
| 79 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) | 79 | if (!zdev || zdev->state != ZPCI_FN_STATE_STANDBY) |
| 80 | break; | 80 | break; |
| 81 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | 81 | zdev->state = ZPCI_FN_STATE_CONFIGURED; |
| 82 | zdev->fh = ccdf->fh; | 82 | zdev->fh = ccdf->fh; |
| @@ -86,7 +86,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) | |||
| 86 | pci_rescan_bus(zdev->bus); | 86 | pci_rescan_bus(zdev->bus); |
| 87 | break; | 87 | break; |
| 88 | case 0x0302: /* Reserved -> Standby */ | 88 | case 0x0302: /* Reserved -> Standby */ |
| 89 | clp_add_pci_device(ccdf->fid, ccdf->fh, 0); | 89 | if (!zdev) |
| 90 | clp_add_pci_device(ccdf->fid, ccdf->fh, 0); | ||
| 90 | break; | 91 | break; |
| 91 | case 0x0303: /* Deconfiguration requested */ | 92 | case 0x0303: /* Deconfiguration requested */ |
| 92 | if (pdev) | 93 | if (pdev) |
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index b56a3958f1a7..9190214b8702 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c | |||
| @@ -12,43 +12,29 @@ | |||
| 12 | #include <linux/stat.h> | 12 | #include <linux/stat.h> |
| 13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
| 14 | 14 | ||
| 15 | static ssize_t show_fid(struct device *dev, struct device_attribute *attr, | 15 | #define zpci_attr(name, fmt, member) \ |
| 16 | char *buf) | 16 | static ssize_t name##_show(struct device *dev, \ |
| 17 | { | 17 | struct device_attribute *attr, char *buf) \ |
| 18 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 18 | { \ |
| 19 | 19 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); \ | |
| 20 | return sprintf(buf, "0x%08x\n", zdev->fid); | 20 | \ |
| 21 | } | 21 | return sprintf(buf, fmt, zdev->member); \ |
| 22 | static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL); | 22 | } \ |
| 23 | 23 | static DEVICE_ATTR_RO(name) | |
| 24 | static ssize_t show_fh(struct device *dev, struct device_attribute *attr, | 24 | |
| 25 | char *buf) | 25 | zpci_attr(function_id, "0x%08x\n", fid); |
| 26 | { | 26 | zpci_attr(function_handle, "0x%08x\n", fh); |
| 27 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 27 | zpci_attr(pchid, "0x%04x\n", pchid); |
| 28 | 28 | zpci_attr(pfgid, "0x%02x\n", pfgid); | |
| 29 | return sprintf(buf, "0x%08x\n", zdev->fh); | 29 | zpci_attr(vfn, "0x%04x\n", vfn); |
| 30 | } | 30 | zpci_attr(pft, "0x%02x\n", pft); |
| 31 | static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL); | 31 | zpci_attr(uid, "0x%x\n", uid); |
| 32 | 32 | zpci_attr(segment0, "0x%02x\n", pfip[0]); | |
| 33 | static ssize_t show_pchid(struct device *dev, struct device_attribute *attr, | 33 | zpci_attr(segment1, "0x%02x\n", pfip[1]); |
| 34 | char *buf) | 34 | zpci_attr(segment2, "0x%02x\n", pfip[2]); |
| 35 | { | 35 | zpci_attr(segment3, "0x%02x\n", pfip[3]); |
| 36 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 36 | |
| 37 | 37 | static ssize_t recover_store(struct device *dev, struct device_attribute *attr, | |
| 38 | return sprintf(buf, "0x%04x\n", zdev->pchid); | ||
| 39 | } | ||
| 40 | static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL); | ||
| 41 | |||
| 42 | static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr, | ||
| 43 | char *buf) | ||
| 44 | { | ||
| 45 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | ||
| 46 | |||
| 47 | return sprintf(buf, "0x%02x\n", zdev->pfgid); | ||
| 48 | } | ||
| 49 | static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL); | ||
| 50 | |||
| 51 | static ssize_t store_recover(struct device *dev, struct device_attribute *attr, | ||
| 52 | const char *buf, size_t count) | 38 | const char *buf, size_t count) |
| 53 | { | 39 | { |
| 54 | struct pci_dev *pdev = to_pci_dev(dev); | 40 | struct pci_dev *pdev = to_pci_dev(dev); |
| @@ -70,20 +56,55 @@ static ssize_t store_recover(struct device *dev, struct device_attribute *attr, | |||
| 70 | pci_rescan_bus(zdev->bus); | 56 | pci_rescan_bus(zdev->bus); |
| 71 | return count; | 57 | return count; |
| 72 | } | 58 | } |
| 73 | static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover); | 59 | static DEVICE_ATTR_WO(recover); |
| 60 | |||
| 61 | static ssize_t util_string_read(struct file *filp, struct kobject *kobj, | ||
| 62 | struct bin_attribute *attr, char *buf, | ||
| 63 | loff_t off, size_t count) | ||
| 64 | { | ||
| 65 | struct device *dev = kobj_to_dev(kobj); | ||
| 66 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 67 | struct zpci_dev *zdev = get_zdev(pdev); | ||
| 68 | |||
| 69 | return memory_read_from_buffer(buf, count, &off, zdev->util_str, | ||
| 70 | sizeof(zdev->util_str)); | ||
| 71 | } | ||
| 72 | static BIN_ATTR_RO(util_string, CLP_UTIL_STR_LEN); | ||
| 73 | static struct bin_attribute *zpci_bin_attrs[] = { | ||
| 74 | &bin_attr_util_string, | ||
| 75 | NULL, | ||
| 76 | }; | ||
| 74 | 77 | ||
| 75 | static struct attribute *zpci_dev_attrs[] = { | 78 | static struct attribute *zpci_dev_attrs[] = { |
| 76 | &dev_attr_function_id.attr, | 79 | &dev_attr_function_id.attr, |
| 77 | &dev_attr_function_handle.attr, | 80 | &dev_attr_function_handle.attr, |
| 78 | &dev_attr_pchid.attr, | 81 | &dev_attr_pchid.attr, |
| 79 | &dev_attr_pfgid.attr, | 82 | &dev_attr_pfgid.attr, |
| 83 | &dev_attr_pft.attr, | ||
| 84 | &dev_attr_vfn.attr, | ||
| 85 | &dev_attr_uid.attr, | ||
| 80 | &dev_attr_recover.attr, | 86 | &dev_attr_recover.attr, |
| 81 | NULL, | 87 | NULL, |
| 82 | }; | 88 | }; |
| 83 | static struct attribute_group zpci_attr_group = { | 89 | static struct attribute_group zpci_attr_group = { |
| 84 | .attrs = zpci_dev_attrs, | 90 | .attrs = zpci_dev_attrs, |
| 91 | .bin_attrs = zpci_bin_attrs, | ||
| 85 | }; | 92 | }; |
| 93 | |||
| 94 | static struct attribute *pfip_attrs[] = { | ||
| 95 | &dev_attr_segment0.attr, | ||
| 96 | &dev_attr_segment1.attr, | ||
| 97 | &dev_attr_segment2.attr, | ||
| 98 | &dev_attr_segment3.attr, | ||
| 99 | NULL, | ||
| 100 | }; | ||
| 101 | static struct attribute_group pfip_attr_group = { | ||
| 102 | .name = "pfip", | ||
| 103 | .attrs = pfip_attrs, | ||
| 104 | }; | ||
| 105 | |||
| 86 | const struct attribute_group *zpci_attr_groups[] = { | 106 | const struct attribute_group *zpci_attr_groups[] = { |
| 87 | &zpci_attr_group, | 107 | &zpci_attr_group, |
| 108 | &pfip_attr_group, | ||
| 88 | NULL, | 109 | NULL, |
| 89 | }; | 110 | }; |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index b69ab17f13fa..629fcc275e92 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
| @@ -33,4 +33,4 @@ obj-$(CONFIG_MONWRITER) += monwriter.o | |||
| 33 | obj-$(CONFIG_S390_VMUR) += vmur.o | 33 | obj-$(CONFIG_S390_VMUR) += vmur.o |
| 34 | 34 | ||
| 35 | zcore_mod-objs := sclp_sdias.o zcore.o | 35 | zcore_mod-objs := sclp_sdias.o zcore.o |
| 36 | obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o | 36 | obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 3d8e4d63f514..1884653e4472 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | #include <linux/miscdevice.h> | 17 | #include <linux/miscdevice.h> |
| 18 | #include <linux/debugfs.h> | 18 | #include <linux/debugfs.h> |
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 20 | #include <linux/memblock.h> | ||
| 21 | |||
| 20 | #include <asm/asm-offsets.h> | 22 | #include <asm/asm-offsets.h> |
| 21 | #include <asm/ipl.h> | 23 | #include <asm/ipl.h> |
| 22 | #include <asm/sclp.h> | 24 | #include <asm/sclp.h> |
| @@ -411,33 +413,24 @@ static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, | |||
| 411 | size_t count, loff_t *ppos) | 413 | size_t count, loff_t *ppos) |
| 412 | { | 414 | { |
| 413 | return simple_read_from_buffer(buf, count, ppos, filp->private_data, | 415 | return simple_read_from_buffer(buf, count, ppos, filp->private_data, |
| 414 | MEMORY_CHUNKS * CHUNK_INFO_SIZE); | 416 | memblock.memory.cnt * CHUNK_INFO_SIZE); |
| 415 | } | 417 | } |
| 416 | 418 | ||
| 417 | static int zcore_memmap_open(struct inode *inode, struct file *filp) | 419 | static int zcore_memmap_open(struct inode *inode, struct file *filp) |
| 418 | { | 420 | { |
| 419 | int i; | 421 | struct memblock_region *reg; |
| 420 | char *buf; | 422 | char *buf; |
| 421 | struct mem_chunk *chunk_array; | 423 | int i = 0; |
| 422 | 424 | ||
| 423 | chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), | 425 | buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL); |
| 424 | GFP_KERNEL); | ||
| 425 | if (!chunk_array) | ||
| 426 | return -ENOMEM; | ||
| 427 | detect_memory_layout(chunk_array, 0); | ||
| 428 | buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL); | ||
| 429 | if (!buf) { | 426 | if (!buf) { |
| 430 | kfree(chunk_array); | ||
| 431 | return -ENOMEM; | 427 | return -ENOMEM; |
| 432 | } | 428 | } |
| 433 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 429 | for_each_memblock(memory, reg) { |
| 434 | sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", | 430 | sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ", |
| 435 | (unsigned long long) chunk_array[i].addr, | 431 | (unsigned long long) reg->base, |
| 436 | (unsigned long long) chunk_array[i].size); | 432 | (unsigned long long) reg->size); |
| 437 | if (chunk_array[i].size == 0) | ||
| 438 | break; | ||
| 439 | } | 433 | } |
| 440 | kfree(chunk_array); | ||
| 441 | filp->private_data = buf; | 434 | filp->private_data = buf; |
| 442 | return nonseekable_open(inode, filp); | 435 | return nonseekable_open(inode, filp); |
| 443 | } | 436 | } |
| @@ -593,21 +586,12 @@ static int __init check_sdias(void) | |||
| 593 | 586 | ||
| 594 | static int __init get_mem_info(unsigned long *mem, unsigned long *end) | 587 | static int __init get_mem_info(unsigned long *mem, unsigned long *end) |
| 595 | { | 588 | { |
| 596 | int i; | 589 | struct memblock_region *reg; |
| 597 | struct mem_chunk *chunk_array; | ||
| 598 | 590 | ||
| 599 | chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), | 591 | for_each_memblock(memory, reg) { |
| 600 | GFP_KERNEL); | 592 | *mem += reg->size; |
| 601 | if (!chunk_array) | 593 | *end = max_t(unsigned long, *end, reg->base + reg->size); |
| 602 | return -ENOMEM; | ||
| 603 | detect_memory_layout(chunk_array, 0); | ||
| 604 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
| 605 | if (chunk_array[i].size == 0) | ||
| 606 | break; | ||
| 607 | *mem += chunk_array[i].size; | ||
| 608 | *end = max(*end, chunk_array[i].addr + chunk_array[i].size); | ||
| 609 | } | 594 | } |
| 610 | kfree(chunk_array); | ||
| 611 | return 0; | 595 | return 0; |
| 612 | } | 596 | } |
| 613 | 597 | ||
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 5156264d0c74..07676c22d514 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c | |||
| @@ -46,7 +46,7 @@ static u16 ccwreq_next_path(struct ccw_device *cdev) | |||
| 46 | goto out; | 46 | goto out; |
| 47 | } | 47 | } |
| 48 | req->retries = req->maxretries; | 48 | req->retries = req->maxretries; |
| 49 | req->mask = lpm_adjust(req->mask >>= 1, req->lpm); | 49 | req->mask = lpm_adjust(req->mask >> 1, req->lpm); |
| 50 | out: | 50 | out: |
| 51 | return req->mask; | 51 | return req->mask; |
| 52 | } | 52 | } |
| @@ -252,7 +252,7 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) | |||
| 252 | */ | 252 | */ |
| 253 | void ccw_request_handler(struct ccw_device *cdev) | 253 | void ccw_request_handler(struct ccw_device *cdev) |
| 254 | { | 254 | { |
| 255 | struct irb *irb = (struct irb *)&S390_lowcore.irb; | 255 | struct irb *irb = &__get_cpu_var(cio_irb); |
| 256 | struct ccw_request *req = &cdev->private->req; | 256 | struct ccw_request *req = &cdev->private->req; |
| 257 | enum io_status status; | 257 | enum io_status status; |
| 258 | int rc = -EOPNOTSUPP; | 258 | int rc = -EOPNOTSUPP; |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 6c440d4349d4..d497aa05a72f 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
| @@ -509,7 +509,7 @@ out: | |||
| 509 | * On success return a newly allocated copy of the channel-path description | 509 | * On success return a newly allocated copy of the channel-path description |
| 510 | * data associated with the given channel-path ID. Return %NULL on error. | 510 | * data associated with the given channel-path ID. Return %NULL on error. |
| 511 | */ | 511 | */ |
| 512 | void *chp_get_chp_desc(struct chp_id chpid) | 512 | struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid) |
| 513 | { | 513 | { |
| 514 | struct channel_path *chp; | 514 | struct channel_path *chp; |
| 515 | struct channel_path_desc *desc; | 515 | struct channel_path_desc *desc; |
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 9284b785a06f..4efd5b867cc3 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
| @@ -60,7 +60,7 @@ static inline struct channel_path *chpid_to_chp(struct chp_id chpid) | |||
| 60 | int chp_get_status(struct chp_id chpid); | 60 | int chp_get_status(struct chp_id chpid); |
| 61 | u8 chp_get_sch_opm(struct subchannel *sch); | 61 | u8 chp_get_sch_opm(struct subchannel *sch); |
| 62 | int chp_is_registered(struct chp_id chpid); | 62 | int chp_is_registered(struct chp_id chpid); |
| 63 | void *chp_get_chp_desc(struct chp_id chpid); | 63 | struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid); |
| 64 | void chp_remove_cmg_attr(struct channel_path *chp); | 64 | void chp_remove_cmg_attr(struct channel_path *chp); |
| 65 | int chp_add_cmg_attr(struct channel_path *chp); | 65 | int chp_add_cmg_attr(struct channel_path *chp); |
| 66 | int chp_update_desc(struct channel_path *chp); | 66 | int chp_update_desc(struct channel_path *chp); |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 7e53a9c8b0b9..76c9b50700b2 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
| @@ -21,17 +21,6 @@ struct cmg_entry { | |||
| 21 | u32 values[NR_MEASUREMENT_ENTRIES]; | 21 | u32 values[NR_MEASUREMENT_ENTRIES]; |
| 22 | } __attribute__ ((packed)); | 22 | } __attribute__ ((packed)); |
| 23 | 23 | ||
| 24 | struct channel_path_desc { | ||
| 25 | u8 flags; | ||
| 26 | u8 lsn; | ||
| 27 | u8 desc; | ||
| 28 | u8 chpid; | ||
| 29 | u8 swla; | ||
| 30 | u8 zeroes; | ||
| 31 | u8 chla; | ||
| 32 | u8 chpp; | ||
| 33 | } __attribute__ ((packed)); | ||
| 34 | |||
| 35 | struct channel_path_desc_fmt1 { | 24 | struct channel_path_desc_fmt1 { |
| 36 | u8 flags; | 25 | u8 flags; |
| 37 | u8 lsn; | 26 | u8 lsn; |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 1d3661af7bd8..3d22d2a4ce14 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
| @@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) | |||
| 58 | { | 58 | { |
| 59 | struct chsc_private *private = dev_get_drvdata(&sch->dev); | 59 | struct chsc_private *private = dev_get_drvdata(&sch->dev); |
| 60 | struct chsc_request *request = private->request; | 60 | struct chsc_request *request = private->request; |
| 61 | struct irb *irb = (struct irb *)&S390_lowcore.irb; | 61 | struct irb *irb = &__get_cpu_var(cio_irb); |
| 62 | 62 | ||
| 63 | CHSC_LOG(4, "irb"); | 63 | CHSC_LOG(4, "irb"); |
| 64 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); | 64 | CHSC_LOG_HEX(4, irb, sizeof(*irb)); |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9e058c4657a3..77f9c92df4b9 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -46,6 +46,9 @@ debug_info_t *cio_debug_msg_id; | |||
| 46 | debug_info_t *cio_debug_trace_id; | 46 | debug_info_t *cio_debug_trace_id; |
| 47 | debug_info_t *cio_debug_crw_id; | 47 | debug_info_t *cio_debug_crw_id; |
| 48 | 48 | ||
| 49 | DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb); | ||
| 50 | EXPORT_PER_CPU_SYMBOL(cio_irb); | ||
| 51 | |||
| 49 | /* | 52 | /* |
| 50 | * Function: cio_debug_init | 53 | * Function: cio_debug_init |
| 51 | * Initializes three debug logs for common I/O: | 54 | * Initializes three debug logs for common I/O: |
| @@ -560,7 +563,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) | |||
| 560 | 563 | ||
| 561 | __this_cpu_write(s390_idle.nohz_delay, 1); | 564 | __this_cpu_write(s390_idle.nohz_delay, 1); |
| 562 | tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; | 565 | tpi_info = (struct tpi_info *) &get_irq_regs()->int_code; |
| 563 | irb = (struct irb *) &S390_lowcore.irb; | 566 | irb = &__get_cpu_var(cio_irb); |
| 564 | sch = (struct subchannel *)(unsigned long) tpi_info->intparm; | 567 | sch = (struct subchannel *)(unsigned long) tpi_info->intparm; |
| 565 | if (!sch) { | 568 | if (!sch) { |
| 566 | /* Clear pending interrupt condition. */ | 569 | /* Clear pending interrupt condition. */ |
| @@ -609,7 +612,7 @@ void cio_tsch(struct subchannel *sch) | |||
| 609 | struct irb *irb; | 612 | struct irb *irb; |
| 610 | int irq_context; | 613 | int irq_context; |
| 611 | 614 | ||
| 612 | irb = (struct irb *)&S390_lowcore.irb; | 615 | irb = &__get_cpu_var(cio_irb); |
| 613 | /* Store interrupt response block to lowcore. */ | 616 | /* Store interrupt response block to lowcore. */ |
| 614 | if (tsch(sch->schid, irb) != 0) | 617 | if (tsch(sch->schid, irb) != 0) |
| 615 | /* Not status pending or not operational. */ | 618 | /* Not status pending or not operational. */ |
| @@ -746,7 +749,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid) | |||
| 746 | struct tpi_info ti; | 749 | struct tpi_info ti; |
| 747 | 750 | ||
| 748 | if (tpi(&ti)) { | 751 | if (tpi(&ti)) { |
| 749 | tsch(ti.schid, (struct irb *)&S390_lowcore.irb); | 752 | tsch(ti.schid, &__get_cpu_var(cio_irb)); |
| 750 | if (schid_equal(&ti.schid, &schid)) | 753 | if (schid_equal(&ti.schid, &schid)) |
| 751 | return 0; | 754 | return 0; |
| 752 | } | 755 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index d42f67412bd8..a01376ae1749 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
| @@ -102,6 +102,8 @@ struct subchannel { | |||
| 102 | struct schib_config config; | 102 | struct schib_config config; |
| 103 | } __attribute__ ((aligned(8))); | 103 | } __attribute__ ((aligned(8))); |
| 104 | 104 | ||
| 105 | DECLARE_PER_CPU(struct irb, cio_irb); | ||
| 106 | |||
| 105 | #define to_subchannel(n) container_of(n, struct subchannel, dev) | 107 | #define to_subchannel(n) container_of(n, struct subchannel, dev) |
| 106 | 108 | ||
| 107 | extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); | 109 | extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c7638c543250..0bc902b3cd84 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -739,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 739 | struct irb *irb; | 739 | struct irb *irb; |
| 740 | int is_cmd; | 740 | int is_cmd; |
| 741 | 741 | ||
| 742 | irb = (struct irb *)&S390_lowcore.irb; | 742 | irb = &__get_cpu_var(cio_irb); |
| 743 | is_cmd = !scsw_is_tm(&irb->scsw); | 743 | is_cmd = !scsw_is_tm(&irb->scsw); |
| 744 | /* Check for unsolicited interrupt. */ | 744 | /* Check for unsolicited interrupt. */ |
| 745 | if (!scsw_is_solicited(&irb->scsw)) { | 745 | if (!scsw_is_solicited(&irb->scsw)) { |
| @@ -805,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 805 | { | 805 | { |
| 806 | struct irb *irb; | 806 | struct irb *irb; |
| 807 | 807 | ||
| 808 | irb = (struct irb *)&S390_lowcore.irb; | 808 | irb = &__get_cpu_var(cio_irb); |
| 809 | /* Check for unsolicited interrupt. */ | 809 | /* Check for unsolicited interrupt. */ |
| 810 | if (scsw_stctl(&irb->scsw) == | 810 | if (scsw_stctl(&irb->scsw) == |
| 811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 4845d64f2842..f3c417943dad 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
| @@ -563,14 +563,23 @@ out_unlock: | |||
| 563 | return rc; | 563 | return rc; |
| 564 | } | 564 | } |
| 565 | 565 | ||
| 566 | void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) | 566 | /** |
| 567 | * chp_get_chp_desc - return newly allocated channel-path descriptor | ||
| 568 | * @cdev: device to obtain the descriptor for | ||
| 569 | * @chp_idx: index of the channel path | ||
| 570 | * | ||
| 571 | * On success return a newly allocated copy of the channel-path description | ||
| 572 | * data associated with the given channel path. Return %NULL on error. | ||
| 573 | */ | ||
| 574 | struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev, | ||
| 575 | int chp_idx) | ||
| 567 | { | 576 | { |
| 568 | struct subchannel *sch; | 577 | struct subchannel *sch; |
| 569 | struct chp_id chpid; | 578 | struct chp_id chpid; |
| 570 | 579 | ||
| 571 | sch = to_subchannel(cdev->dev.parent); | 580 | sch = to_subchannel(cdev->dev.parent); |
| 572 | chp_id_init(&chpid); | 581 | chp_id_init(&chpid); |
| 573 | chpid.id = sch->schib.pmcw.chpid[chp_no]; | 582 | chpid.id = sch->schib.pmcw.chpid[chp_idx]; |
| 574 | return chp_get_chp_desc(chpid); | 583 | return chp_get_chp_desc(chpid); |
| 575 | } | 584 | } |
| 576 | 585 | ||
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 3a2ee4a740b4..c4f7bf3e24c2 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c | |||
| @@ -134,7 +134,7 @@ static void eadm_subchannel_irq(struct subchannel *sch) | |||
| 134 | { | 134 | { |
| 135 | struct eadm_private *private = get_eadm_private(sch); | 135 | struct eadm_private *private = get_eadm_private(sch); |
| 136 | struct eadm_scsw *scsw = &sch->schib.scsw.eadm; | 136 | struct eadm_scsw *scsw = &sch->schib.scsw.eadm; |
| 137 | struct irb *irb = (struct irb *)&S390_lowcore.irb; | 137 | struct irb *irb = &__get_cpu_var(cio_irb); |
| 138 | int error = 0; | 138 | int error = 0; |
| 139 | 139 | ||
| 140 | EADM_LOG(6, "irq"); | 140 | EADM_LOG(6, "irq"); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 22470a3b182f..e89f38c31176 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <net/iucv/af_iucv.h> | 22 | #include <net/iucv/af_iucv.h> |
| 23 | 23 | ||
| 24 | #include <asm/ebcdic.h> | 24 | #include <asm/ebcdic.h> |
| 25 | #include <asm/chpid.h> | ||
| 25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 26 | #include <asm/sysinfo.h> | 27 | #include <asm/sysinfo.h> |
| 27 | #include <asm/compat.h> | 28 | #include <asm/compat.h> |
| @@ -1344,16 +1345,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card) | |||
| 1344 | static void qeth_update_from_chp_desc(struct qeth_card *card) | 1345 | static void qeth_update_from_chp_desc(struct qeth_card *card) |
| 1345 | { | 1346 | { |
| 1346 | struct ccw_device *ccwdev; | 1347 | struct ccw_device *ccwdev; |
| 1347 | struct channelPath_dsc { | 1348 | struct channel_path_desc *chp_dsc; |
| 1348 | u8 flags; | ||
| 1349 | u8 lsn; | ||
| 1350 | u8 desc; | ||
| 1351 | u8 chpid; | ||
| 1352 | u8 swla; | ||
| 1353 | u8 zeroes; | ||
| 1354 | u8 chla; | ||
| 1355 | u8 chpp; | ||
| 1356 | } *chp_dsc; | ||
| 1357 | 1349 | ||
| 1358 | QETH_DBF_TEXT(SETUP, 2, "chp_desc"); | 1350 | QETH_DBF_TEXT(SETUP, 2, "chp_desc"); |
| 1359 | 1351 | ||
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 8a20a51ed42d..73dc382e72d8 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
| 19 | 19 | ||
| 20 | #define INIT_MEMBLOCK_REGIONS 128 | 20 | #define INIT_MEMBLOCK_REGIONS 128 |
| 21 | #define INIT_PHYSMEM_REGIONS 4 | ||
| 21 | 22 | ||
| 22 | /* Definition of memblock flags. */ | 23 | /* Definition of memblock flags. */ |
| 23 | #define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ | 24 | #define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ |
| @@ -43,6 +44,9 @@ struct memblock { | |||
| 43 | phys_addr_t current_limit; | 44 | phys_addr_t current_limit; |
| 44 | struct memblock_type memory; | 45 | struct memblock_type memory; |
| 45 | struct memblock_type reserved; | 46 | struct memblock_type reserved; |
| 47 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | ||
| 48 | struct memblock_type physmem; | ||
| 49 | #endif | ||
| 46 | }; | 50 | }; |
| 47 | 51 | ||
| 48 | extern struct memblock memblock; | 52 | extern struct memblock memblock; |
| @@ -71,6 +75,63 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); | |||
| 71 | void memblock_trim_memory(phys_addr_t align); | 75 | void memblock_trim_memory(phys_addr_t align); |
| 72 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); | 76 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
| 73 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | 77 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); |
| 78 | |||
| 79 | /* Low level functions */ | ||
| 80 | int memblock_add_range(struct memblock_type *type, | ||
| 81 | phys_addr_t base, phys_addr_t size, | ||
| 82 | int nid, unsigned long flags); | ||
| 83 | |||
| 84 | int memblock_remove_range(struct memblock_type *type, | ||
| 85 | phys_addr_t base, | ||
| 86 | phys_addr_t size); | ||
| 87 | |||
| 88 | void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, | ||
| 89 | struct memblock_type *type_b, phys_addr_t *out_start, | ||
| 90 | phys_addr_t *out_end, int *out_nid); | ||
| 91 | |||
| 92 | void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, | ||
| 93 | struct memblock_type *type_b, phys_addr_t *out_start, | ||
| 94 | phys_addr_t *out_end, int *out_nid); | ||
| 95 | |||
| 96 | /** | ||
| 97 | * for_each_mem_range - iterate through memblock areas from type_a and not | ||
| 98 | * included in type_b. Or just type_a if type_b is NULL. | ||
| 99 | * @i: u64 used as loop variable | ||
| 100 | * @type_a: ptr to memblock_type to iterate | ||
| 101 | * @type_b: ptr to memblock_type which excludes from the iteration | ||
| 102 | * @nid: node selector, %NUMA_NO_NODE for all nodes | ||
| 103 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 104 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 105 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 106 | */ | ||
| 107 | #define for_each_mem_range(i, type_a, type_b, nid, \ | ||
| 108 | p_start, p_end, p_nid) \ | ||
| 109 | for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ | ||
| 110 | p_start, p_end, p_nid); \ | ||
| 111 | i != (u64)ULLONG_MAX; \ | ||
| 112 | __next_mem_range(&i, nid, type_a, type_b, \ | ||
| 113 | p_start, p_end, p_nid)) | ||
| 114 | |||
| 115 | /** | ||
| 116 | * for_each_mem_range_rev - reverse iterate through memblock areas from | ||
| 117 | * type_a and not included in type_b. Or just type_a if type_b is NULL. | ||
| 118 | * @i: u64 used as loop variable | ||
| 119 | * @type_a: ptr to memblock_type to iterate | ||
| 120 | * @type_b: ptr to memblock_type which excludes from the iteration | ||
| 121 | * @nid: node selector, %NUMA_NO_NODE for all nodes | ||
| 122 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | ||
| 123 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | ||
| 124 | * @p_nid: ptr to int for nid of the range, can be %NULL | ||
| 125 | */ | ||
| 126 | #define for_each_mem_range_rev(i, type_a, type_b, nid, \ | ||
| 127 | p_start, p_end, p_nid) \ | ||
| 128 | for (i = (u64)ULLONG_MAX, \ | ||
| 129 | __next_mem_range_rev(&i, nid, type_a, type_b, \ | ||
| 130 | p_start, p_end, p_nid); \ | ||
| 131 | i != (u64)ULLONG_MAX; \ | ||
| 132 | __next_mem_range_rev(&i, nid, type_a, type_b, \ | ||
| 133 | p_start, p_end, p_nid)) | ||
| 134 | |||
| 74 | #ifdef CONFIG_MOVABLE_NODE | 135 | #ifdef CONFIG_MOVABLE_NODE |
| 75 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) | 136 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) |
| 76 | { | 137 | { |
| @@ -113,9 +174,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | |||
| 113 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) | 174 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) |
| 114 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ | 175 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
| 115 | 176 | ||
| 116 | void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, | ||
| 117 | phys_addr_t *out_end, int *out_nid); | ||
| 118 | |||
| 119 | /** | 177 | /** |
| 120 | * for_each_free_mem_range - iterate through free memblock areas | 178 | * for_each_free_mem_range - iterate through free memblock areas |
| 121 | * @i: u64 used as loop variable | 179 | * @i: u64 used as loop variable |
| @@ -128,13 +186,8 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, | |||
| 128 | * soon as memblock is initialized. | 186 | * soon as memblock is initialized. |
| 129 | */ | 187 | */ |
| 130 | #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ | 188 | #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ |
| 131 | for (i = 0, \ | 189 | for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
| 132 | __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ | 190 | nid, p_start, p_end, p_nid) |
| 133 | i != (u64)ULLONG_MAX; \ | ||
| 134 | __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) | ||
| 135 | |||
| 136 | void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, | ||
| 137 | phys_addr_t *out_end, int *out_nid); | ||
| 138 | 191 | ||
| 139 | /** | 192 | /** |
| 140 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas | 193 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas |
| @@ -148,10 +201,8 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, | |||
| 148 | * order. Available as soon as memblock is initialized. | 201 | * order. Available as soon as memblock is initialized. |
| 149 | */ | 202 | */ |
| 150 | #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ | 203 | #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ |
| 151 | for (i = (u64)ULLONG_MAX, \ | 204 | for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
| 152 | __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ | 205 | nid, p_start, p_end, p_nid) |
| 153 | i != (u64)ULLONG_MAX; \ | ||
| 154 | __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) | ||
| 155 | 206 | ||
| 156 | static inline void memblock_set_region_flags(struct memblock_region *r, | 207 | static inline void memblock_set_region_flags(struct memblock_region *r, |
| 157 | unsigned long flags) | 208 | unsigned long flags) |
diff --git a/mm/Kconfig b/mm/Kconfig index 1b5a95f0fa01..28cec518f4d4 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -134,6 +134,9 @@ config HAVE_MEMBLOCK | |||
| 134 | config HAVE_MEMBLOCK_NODE_MAP | 134 | config HAVE_MEMBLOCK_NODE_MAP |
| 135 | boolean | 135 | boolean |
| 136 | 136 | ||
| 137 | config HAVE_MEMBLOCK_PHYS_MAP | ||
| 138 | boolean | ||
| 139 | |||
| 137 | config ARCH_DISCARD_MEMBLOCK | 140 | config ARCH_DISCARD_MEMBLOCK |
| 138 | boolean | 141 | boolean |
| 139 | 142 | ||
diff --git a/mm/memblock.c b/mm/memblock.c index e9d6ca9a01a9..a810ba923cdd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | 27 | ||
| 28 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 28 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
| 29 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 29 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
| 30 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | ||
| 31 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; | ||
| 32 | #endif | ||
| 30 | 33 | ||
| 31 | struct memblock memblock __initdata_memblock = { | 34 | struct memblock memblock __initdata_memblock = { |
| 32 | .memory.regions = memblock_memory_init_regions, | 35 | .memory.regions = memblock_memory_init_regions, |
| @@ -37,6 +40,12 @@ struct memblock memblock __initdata_memblock = { | |||
| 37 | .reserved.cnt = 1, /* empty dummy entry */ | 40 | .reserved.cnt = 1, /* empty dummy entry */ |
| 38 | .reserved.max = INIT_MEMBLOCK_REGIONS, | 41 | .reserved.max = INIT_MEMBLOCK_REGIONS, |
| 39 | 42 | ||
| 43 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | ||
| 44 | .physmem.regions = memblock_physmem_init_regions, | ||
| 45 | .physmem.cnt = 1, /* empty dummy entry */ | ||
| 46 | .physmem.max = INIT_PHYSMEM_REGIONS, | ||
| 47 | #endif | ||
| 48 | |||
| 40 | .bottom_up = false, | 49 | .bottom_up = false, |
| 41 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, | 50 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, |
| 42 | }; | 51 | }; |
| @@ -472,7 +481,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, | |||
| 472 | } | 481 | } |
| 473 | 482 | ||
| 474 | /** | 483 | /** |
| 475 | * memblock_add_region - add new memblock region | 484 | * memblock_add_range - add new memblock region |
| 476 | * @type: memblock type to add new region into | 485 | * @type: memblock type to add new region into |
| 477 | * @base: base address of the new region | 486 | * @base: base address of the new region |
| 478 | * @size: size of the new region | 487 | * @size: size of the new region |
| @@ -487,7 +496,7 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, | |||
| 487 | * RETURNS: | 496 | * RETURNS: |
| 488 | * 0 on success, -errno on failure. | 497 | * 0 on success, -errno on failure. |
| 489 | */ | 498 | */ |
| 490 | static int __init_memblock memblock_add_region(struct memblock_type *type, | 499 | int __init_memblock memblock_add_range(struct memblock_type *type, |
| 491 | phys_addr_t base, phys_addr_t size, | 500 | phys_addr_t base, phys_addr_t size, |
| 492 | int nid, unsigned long flags) | 501 | int nid, unsigned long flags) |
| 493 | { | 502 | { |
| @@ -569,12 +578,12 @@ repeat: | |||
| 569 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, | 578 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
| 570 | int nid) | 579 | int nid) |
| 571 | { | 580 | { |
| 572 | return memblock_add_region(&memblock.memory, base, size, nid, 0); | 581 | return memblock_add_range(&memblock.memory, base, size, nid, 0); |
| 573 | } | 582 | } |
| 574 | 583 | ||
| 575 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) | 584 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
| 576 | { | 585 | { |
| 577 | return memblock_add_region(&memblock.memory, base, size, | 586 | return memblock_add_range(&memblock.memory, base, size, |
| 578 | MAX_NUMNODES, 0); | 587 | MAX_NUMNODES, 0); |
| 579 | } | 588 | } |
| 580 | 589 | ||
| @@ -654,8 +663,8 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |||
| 654 | return 0; | 663 | return 0; |
| 655 | } | 664 | } |
| 656 | 665 | ||
| 657 | static int __init_memblock __memblock_remove(struct memblock_type *type, | 666 | int __init_memblock memblock_remove_range(struct memblock_type *type, |
| 658 | phys_addr_t base, phys_addr_t size) | 667 | phys_addr_t base, phys_addr_t size) |
| 659 | { | 668 | { |
| 660 | int start_rgn, end_rgn; | 669 | int start_rgn, end_rgn; |
| 661 | int i, ret; | 670 | int i, ret; |
| @@ -671,9 +680,10 @@ static int __init_memblock __memblock_remove(struct memblock_type *type, | |||
| 671 | 680 | ||
| 672 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) | 681 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
| 673 | { | 682 | { |
| 674 | return __memblock_remove(&memblock.memory, base, size); | 683 | return memblock_remove_range(&memblock.memory, base, size); |
| 675 | } | 684 | } |
| 676 | 685 | ||
| 686 | |||
| 677 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) | 687 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
| 678 | { | 688 | { |
| 679 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", | 689 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", |
| @@ -681,7 +691,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) | |||
| 681 | (unsigned long long)base + size - 1, | 691 | (unsigned long long)base + size - 1, |
| 682 | (void *)_RET_IP_); | 692 | (void *)_RET_IP_); |
| 683 | 693 | ||
| 684 | return __memblock_remove(&memblock.reserved, base, size); | 694 | return memblock_remove_range(&memblock.reserved, base, size); |
| 685 | } | 695 | } |
| 686 | 696 | ||
| 687 | static int __init_memblock memblock_reserve_region(phys_addr_t base, | 697 | static int __init_memblock memblock_reserve_region(phys_addr_t base, |
| @@ -696,7 +706,7 @@ static int __init_memblock memblock_reserve_region(phys_addr_t base, | |||
| 696 | (unsigned long long)base + size - 1, | 706 | (unsigned long long)base + size - 1, |
| 697 | flags, (void *)_RET_IP_); | 707 | flags, (void *)_RET_IP_); |
| 698 | 708 | ||
| 699 | return memblock_add_region(_rgn, base, size, nid, flags); | 709 | return memblock_add_range(_rgn, base, size, nid, flags); |
| 700 | } | 710 | } |
| 701 | 711 | ||
| 702 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | 712 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
| @@ -758,17 +768,19 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) | |||
| 758 | } | 768 | } |
| 759 | 769 | ||
| 760 | /** | 770 | /** |
| 761 | * __next_free_mem_range - next function for for_each_free_mem_range() | 771 | * __next__mem_range - next function for for_each_free_mem_range() etc. |
| 762 | * @idx: pointer to u64 loop variable | 772 | * @idx: pointer to u64 loop variable |
| 763 | * @nid: node selector, %NUMA_NO_NODE for all nodes | 773 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
| 774 | * @type_a: pointer to memblock_type from where the range is taken | ||
| 775 | * @type_b: pointer to memblock_type which excludes memory from being taken | ||
| 764 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL | 776 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 765 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL | 777 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 766 | * @out_nid: ptr to int for nid of the range, can be %NULL | 778 | * @out_nid: ptr to int for nid of the range, can be %NULL |
| 767 | * | 779 | * |
| 768 | * Find the first free area from *@idx which matches @nid, fill the out | 780 | * Find the first area from *@idx which matches @nid, fill the out |
| 769 | * parameters, and update *@idx for the next iteration. The lower 32bit of | 781 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
| 770 | * *@idx contains index into memory region and the upper 32bit indexes the | 782 | * *@idx contains index into type_a and the upper 32bit indexes the |
| 771 | * areas before each reserved region. For example, if reserved regions | 783 | * areas before each region in type_b. For example, if type_b regions |
| 772 | * look like the following, | 784 | * look like the following, |
| 773 | * | 785 | * |
| 774 | * 0:[0-16), 1:[32-48), 2:[128-130) | 786 | * 0:[0-16), 1:[32-48), 2:[128-130) |
| @@ -780,53 +792,77 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) | |||
| 780 | * As both region arrays are sorted, the function advances the two indices | 792 | * As both region arrays are sorted, the function advances the two indices |
| 781 | * in lockstep and returns each intersection. | 793 | * in lockstep and returns each intersection. |
| 782 | */ | 794 | */ |
| 783 | void __init_memblock __next_free_mem_range(u64 *idx, int nid, | 795 | void __init_memblock __next_mem_range(u64 *idx, int nid, |
| 784 | phys_addr_t *out_start, | 796 | struct memblock_type *type_a, |
| 785 | phys_addr_t *out_end, int *out_nid) | 797 | struct memblock_type *type_b, |
| 798 | phys_addr_t *out_start, | ||
| 799 | phys_addr_t *out_end, int *out_nid) | ||
| 786 | { | 800 | { |
| 787 | struct memblock_type *mem = &memblock.memory; | 801 | int idx_a = *idx & 0xffffffff; |
| 788 | struct memblock_type *rsv = &memblock.reserved; | 802 | int idx_b = *idx >> 32; |
| 789 | int mi = *idx & 0xffffffff; | ||
| 790 | int ri = *idx >> 32; | ||
| 791 | 803 | ||
| 792 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) | 804 | if (WARN_ONCE(nid == MAX_NUMNODES, |
| 805 | "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) | ||
| 793 | nid = NUMA_NO_NODE; | 806 | nid = NUMA_NO_NODE; |
| 794 | 807 | ||
| 795 | for ( ; mi < mem->cnt; mi++) { | 808 | for (; idx_a < type_a->cnt; idx_a++) { |
| 796 | struct memblock_region *m = &mem->regions[mi]; | 809 | struct memblock_region *m = &type_a->regions[idx_a]; |
| 810 | |||
| 797 | phys_addr_t m_start = m->base; | 811 | phys_addr_t m_start = m->base; |
| 798 | phys_addr_t m_end = m->base + m->size; | 812 | phys_addr_t m_end = m->base + m->size; |
| 813 | int m_nid = memblock_get_region_node(m); | ||
| 799 | 814 | ||
| 800 | /* only memory regions are associated with nodes, check it */ | 815 | /* only memory regions are associated with nodes, check it */ |
| 801 | if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m)) | 816 | if (nid != NUMA_NO_NODE && nid != m_nid) |
| 802 | continue; | 817 | continue; |
| 803 | 818 | ||
| 804 | /* scan areas before each reservation for intersection */ | 819 | if (!type_b) { |
| 805 | for ( ; ri < rsv->cnt + 1; ri++) { | 820 | if (out_start) |
| 806 | struct memblock_region *r = &rsv->regions[ri]; | 821 | *out_start = m_start; |
| 807 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | 822 | if (out_end) |
| 808 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | 823 | *out_end = m_end; |
| 824 | if (out_nid) | ||
| 825 | *out_nid = m_nid; | ||
| 826 | idx_a++; | ||
| 827 | *idx = (u32)idx_a | (u64)idx_b << 32; | ||
| 828 | return; | ||
| 829 | } | ||
| 809 | 830 | ||
| 810 | /* if ri advanced past mi, break out to advance mi */ | 831 | /* scan areas before each reservation */ |
| 832 | for (; idx_b < type_b->cnt + 1; idx_b++) { | ||
| 833 | struct memblock_region *r; | ||
| 834 | phys_addr_t r_start; | ||
| 835 | phys_addr_t r_end; | ||
| 836 | |||
| 837 | r = &type_b->regions[idx_b]; | ||
| 838 | r_start = idx_b ? r[-1].base + r[-1].size : 0; | ||
| 839 | r_end = idx_b < type_b->cnt ? | ||
| 840 | r->base : ULLONG_MAX; | ||
| 841 | |||
| 842 | /* | ||
| 843 | * if idx_b advanced past idx_a, | ||
| 844 | * break out to advance idx_a | ||
| 845 | */ | ||
| 811 | if (r_start >= m_end) | 846 | if (r_start >= m_end) |
| 812 | break; | 847 | break; |
| 813 | /* if the two regions intersect, we're done */ | 848 | /* if the two regions intersect, we're done */ |
| 814 | if (m_start < r_end) { | 849 | if (m_start < r_end) { |
| 815 | if (out_start) | 850 | if (out_start) |
| 816 | *out_start = max(m_start, r_start); | 851 | *out_start = |
| 852 | max(m_start, r_start); | ||
| 817 | if (out_end) | 853 | if (out_end) |
| 818 | *out_end = min(m_end, r_end); | 854 | *out_end = min(m_end, r_end); |
| 819 | if (out_nid) | 855 | if (out_nid) |
| 820 | *out_nid = memblock_get_region_node(m); | 856 | *out_nid = m_nid; |
| 821 | /* | 857 | /* |
| 822 | * The region which ends first is advanced | 858 | * The region which ends first is |
| 823 | * for the next iteration. | 859 | * advanced for the next iteration. |
| 824 | */ | 860 | */ |
| 825 | if (m_end <= r_end) | 861 | if (m_end <= r_end) |
| 826 | mi++; | 862 | idx_a++; |
| 827 | else | 863 | else |
| 828 | ri++; | 864 | idx_b++; |
| 829 | *idx = (u32)mi | (u64)ri << 32; | 865 | *idx = (u32)idx_a | (u64)idx_b << 32; |
| 830 | return; | 866 | return; |
| 831 | } | 867 | } |
| 832 | } | 868 | } |
| @@ -837,57 +873,80 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |||
| 837 | } | 873 | } |
| 838 | 874 | ||
| 839 | /** | 875 | /** |
| 840 | * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() | 876 | * __next_mem_range_rev - generic next function for for_each_*_range_rev() |
| 877 | * | ||
| 878 | * Finds the next range from type_a which is not marked as unsuitable | ||
| 879 | * in type_b. | ||
| 880 | * | ||
| 841 | * @idx: pointer to u64 loop variable | 881 | * @idx: pointer to u64 loop variable |
| 842 | * @nid: nid: node selector, %NUMA_NO_NODE for all nodes | 882 | * @nid: nid: node selector, %NUMA_NO_NODE for all nodes |
| 883 | * @type_a: pointer to memblock_type from where the range is taken | ||
| 884 | * @type_b: pointer to memblock_type which excludes memory from being taken | ||
| 843 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL | 885 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 844 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL | 886 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 845 | * @out_nid: ptr to int for nid of the range, can be %NULL | 887 | * @out_nid: ptr to int for nid of the range, can be %NULL |
| 846 | * | 888 | * |
| 847 | * Reverse of __next_free_mem_range(). | 889 | * Reverse of __next_mem_range(). |
| 848 | * | ||
| 849 | * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't | ||
| 850 | * be able to hot-remove hotpluggable memory used by the kernel. So this | ||
| 851 | * function skip hotpluggable regions if needed when allocating memory for the | ||
| 852 | * kernel. | ||
| 853 | */ | 890 | */ |
| 854 | void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | 891 | void __init_memblock __next_mem_range_rev(u64 *idx, int nid, |
| 855 | phys_addr_t *out_start, | 892 | struct memblock_type *type_a, |
| 856 | phys_addr_t *out_end, int *out_nid) | 893 | struct memblock_type *type_b, |
| 894 | phys_addr_t *out_start, | ||
| 895 | phys_addr_t *out_end, int *out_nid) | ||
| 857 | { | 896 | { |
| 858 | struct memblock_type *mem = &memblock.memory; | 897 | int idx_a = *idx & 0xffffffff; |
| 859 | struct memblock_type *rsv = &memblock.reserved; | 898 | int idx_b = *idx >> 32; |
| 860 | int mi = *idx & 0xffffffff; | ||
| 861 | int ri = *idx >> 32; | ||
| 862 | 899 | ||
| 863 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) | 900 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
| 864 | nid = NUMA_NO_NODE; | 901 | nid = NUMA_NO_NODE; |
| 865 | 902 | ||
| 866 | if (*idx == (u64)ULLONG_MAX) { | 903 | if (*idx == (u64)ULLONG_MAX) { |
| 867 | mi = mem->cnt - 1; | 904 | idx_a = type_a->cnt - 1; |
| 868 | ri = rsv->cnt; | 905 | idx_b = type_b->cnt; |
| 869 | } | 906 | } |
| 870 | 907 | ||
| 871 | for ( ; mi >= 0; mi--) { | 908 | for (; idx_a >= 0; idx_a--) { |
| 872 | struct memblock_region *m = &mem->regions[mi]; | 909 | struct memblock_region *m = &type_a->regions[idx_a]; |
| 910 | |||
| 873 | phys_addr_t m_start = m->base; | 911 | phys_addr_t m_start = m->base; |
| 874 | phys_addr_t m_end = m->base + m->size; | 912 | phys_addr_t m_end = m->base + m->size; |
| 913 | int m_nid = memblock_get_region_node(m); | ||
| 875 | 914 | ||
| 876 | /* only memory regions are associated with nodes, check it */ | 915 | /* only memory regions are associated with nodes, check it */ |
| 877 | if (nid != NUMA_NO_NODE && nid != memblock_get_region_node(m)) | 916 | if (nid != NUMA_NO_NODE && nid != m_nid) |
| 878 | continue; | 917 | continue; |
| 879 | 918 | ||
| 880 | /* skip hotpluggable memory regions if needed */ | 919 | /* skip hotpluggable memory regions if needed */ |
| 881 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) | 920 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) |
| 882 | continue; | 921 | continue; |
| 883 | 922 | ||
| 884 | /* scan areas before each reservation for intersection */ | 923 | if (!type_b) { |
| 885 | for ( ; ri >= 0; ri--) { | 924 | if (out_start) |
| 886 | struct memblock_region *r = &rsv->regions[ri]; | 925 | *out_start = m_start; |
| 887 | phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0; | 926 | if (out_end) |
| 888 | phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX; | 927 | *out_end = m_end; |
| 928 | if (out_nid) | ||
| 929 | *out_nid = m_nid; | ||
| 930 | idx_a++; | ||
| 931 | *idx = (u32)idx_a | (u64)idx_b << 32; | ||
| 932 | return; | ||
| 933 | } | ||
| 934 | |||
| 935 | /* scan areas before each reservation */ | ||
| 936 | for (; idx_b >= 0; idx_b--) { | ||
| 937 | struct memblock_region *r; | ||
| 938 | phys_addr_t r_start; | ||
| 939 | phys_addr_t r_end; | ||
| 940 | |||
| 941 | r = &type_b->regions[idx_b]; | ||
| 942 | r_start = idx_b ? r[-1].base + r[-1].size : 0; | ||
| 943 | r_end = idx_b < type_b->cnt ? | ||
| 944 | r->base : ULLONG_MAX; | ||
| 945 | /* | ||
| 946 | * if idx_b advanced past idx_a, | ||
| 947 | * break out to advance idx_a | ||
| 948 | */ | ||
| 889 | 949 | ||
| 890 | /* if ri advanced past mi, break out to advance mi */ | ||
| 891 | if (r_end <= m_start) | 950 | if (r_end <= m_start) |
| 892 | break; | 951 | break; |
| 893 | /* if the two regions intersect, we're done */ | 952 | /* if the two regions intersect, we're done */ |
| @@ -897,18 +956,17 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | |||
| 897 | if (out_end) | 956 | if (out_end) |
| 898 | *out_end = min(m_end, r_end); | 957 | *out_end = min(m_end, r_end); |
| 899 | if (out_nid) | 958 | if (out_nid) |
| 900 | *out_nid = memblock_get_region_node(m); | 959 | *out_nid = m_nid; |
| 901 | |||
| 902 | if (m_start >= r_start) | 960 | if (m_start >= r_start) |
| 903 | mi--; | 961 | idx_a--; |
| 904 | else | 962 | else |
| 905 | ri--; | 963 | idx_b--; |
| 906 | *idx = (u32)mi | (u64)ri << 32; | 964 | *idx = (u32)idx_a | (u64)idx_b << 32; |
| 907 | return; | 965 | return; |
| 908 | } | 966 | } |
| 909 | } | 967 | } |
| 910 | } | 968 | } |
| 911 | 969 | /* signal end of iteration */ | |
| 912 | *idx = ULLONG_MAX; | 970 | *idx = ULLONG_MAX; |
| 913 | } | 971 | } |
| 914 | 972 | ||
| @@ -1201,7 +1259,7 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) | |||
| 1201 | __func__, (u64)base, (u64)base + size - 1, | 1259 | __func__, (u64)base, (u64)base + size - 1, |
| 1202 | (void *)_RET_IP_); | 1260 | (void *)_RET_IP_); |
| 1203 | kmemleak_free_part(__va(base), size); | 1261 | kmemleak_free_part(__va(base), size); |
| 1204 | __memblock_remove(&memblock.reserved, base, size); | 1262 | memblock_remove_range(&memblock.reserved, base, size); |
| 1205 | } | 1263 | } |
| 1206 | 1264 | ||
| 1207 | /* | 1265 | /* |
| @@ -1287,8 +1345,10 @@ void __init memblock_enforce_memory_limit(phys_addr_t limit) | |||
| 1287 | } | 1345 | } |
| 1288 | 1346 | ||
| 1289 | /* truncate both memory and reserved regions */ | 1347 | /* truncate both memory and reserved regions */ |
| 1290 | __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX); | 1348 | memblock_remove_range(&memblock.memory, max_addr, |
| 1291 | __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX); | 1349 | (phys_addr_t)ULLONG_MAX); |
| 1350 | memblock_remove_range(&memblock.reserved, max_addr, | ||
| 1351 | (phys_addr_t)ULLONG_MAX); | ||
| 1292 | } | 1352 | } |
| 1293 | 1353 | ||
| 1294 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) | 1354 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
| @@ -1502,6 +1562,9 @@ static int __init memblock_init_debugfs(void) | |||
| 1502 | return -ENXIO; | 1562 | return -ENXIO; |
| 1503 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | 1563 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); |
| 1504 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | 1564 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); |
| 1565 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | ||
| 1566 | debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); | ||
| 1567 | #endif | ||
| 1505 | 1568 | ||
| 1506 | return 0; | 1569 | return 0; |
| 1507 | } | 1570 | } |
