diff options
33 files changed, 501 insertions, 300 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 6d05ae236036..0a2e44ce077a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2450,17 +2450,17 @@ F: fs/ecryptfs/ | |||
| 2450 | 2450 | ||
| 2451 | EDAC-CORE | 2451 | EDAC-CORE |
| 2452 | M: Doug Thompson <dougthompson@xmission.com> | 2452 | M: Doug Thompson <dougthompson@xmission.com> |
| 2453 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2453 | L: linux-edac@vger.kernel.org |
| 2454 | W: bluesmoke.sourceforge.net | 2454 | W: bluesmoke.sourceforge.net |
| 2455 | S: Supported | 2455 | S: Supported |
| 2456 | F: Documentation/edac.txt | 2456 | F: Documentation/edac.txt |
| 2457 | F: drivers/edac/edac_* | 2457 | F: drivers/edac/ |
| 2458 | F: include/linux/edac.h | 2458 | F: include/linux/edac.h |
| 2459 | 2459 | ||
| 2460 | EDAC-AMD64 | 2460 | EDAC-AMD64 |
| 2461 | M: Doug Thompson <dougthompson@xmission.com> | 2461 | M: Doug Thompson <dougthompson@xmission.com> |
| 2462 | M: Borislav Petkov <borislav.petkov@amd.com> | 2462 | M: Borislav Petkov <borislav.petkov@amd.com> |
| 2463 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2463 | L: linux-edac@vger.kernel.org |
| 2464 | W: bluesmoke.sourceforge.net | 2464 | W: bluesmoke.sourceforge.net |
| 2465 | S: Supported | 2465 | S: Supported |
| 2466 | F: drivers/edac/amd64_edac* | 2466 | F: drivers/edac/amd64_edac* |
| @@ -2468,35 +2468,35 @@ F: drivers/edac/amd64_edac* | |||
| 2468 | EDAC-E752X | 2468 | EDAC-E752X |
| 2469 | M: Mark Gross <mark.gross@intel.com> | 2469 | M: Mark Gross <mark.gross@intel.com> |
| 2470 | M: Doug Thompson <dougthompson@xmission.com> | 2470 | M: Doug Thompson <dougthompson@xmission.com> |
| 2471 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2471 | L: linux-edac@vger.kernel.org |
| 2472 | W: bluesmoke.sourceforge.net | 2472 | W: bluesmoke.sourceforge.net |
| 2473 | S: Maintained | 2473 | S: Maintained |
| 2474 | F: drivers/edac/e752x_edac.c | 2474 | F: drivers/edac/e752x_edac.c |
| 2475 | 2475 | ||
| 2476 | EDAC-E7XXX | 2476 | EDAC-E7XXX |
| 2477 | M: Doug Thompson <dougthompson@xmission.com> | 2477 | M: Doug Thompson <dougthompson@xmission.com> |
| 2478 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2478 | L: linux-edac@vger.kernel.org |
| 2479 | W: bluesmoke.sourceforge.net | 2479 | W: bluesmoke.sourceforge.net |
| 2480 | S: Maintained | 2480 | S: Maintained |
| 2481 | F: drivers/edac/e7xxx_edac.c | 2481 | F: drivers/edac/e7xxx_edac.c |
| 2482 | 2482 | ||
| 2483 | EDAC-I82443BXGX | 2483 | EDAC-I82443BXGX |
| 2484 | M: Tim Small <tim@buttersideup.com> | 2484 | M: Tim Small <tim@buttersideup.com> |
| 2485 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2485 | L: linux-edac@vger.kernel.org |
| 2486 | W: bluesmoke.sourceforge.net | 2486 | W: bluesmoke.sourceforge.net |
| 2487 | S: Maintained | 2487 | S: Maintained |
| 2488 | F: drivers/edac/i82443bxgx_edac.c | 2488 | F: drivers/edac/i82443bxgx_edac.c |
| 2489 | 2489 | ||
| 2490 | EDAC-I3000 | 2490 | EDAC-I3000 |
| 2491 | M: Jason Uhlenkott <juhlenko@akamai.com> | 2491 | M: Jason Uhlenkott <juhlenko@akamai.com> |
| 2492 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2492 | L: linux-edac@vger.kernel.org |
| 2493 | W: bluesmoke.sourceforge.net | 2493 | W: bluesmoke.sourceforge.net |
| 2494 | S: Maintained | 2494 | S: Maintained |
| 2495 | F: drivers/edac/i3000_edac.c | 2495 | F: drivers/edac/i3000_edac.c |
| 2496 | 2496 | ||
| 2497 | EDAC-I5000 | 2497 | EDAC-I5000 |
| 2498 | M: Doug Thompson <dougthompson@xmission.com> | 2498 | M: Doug Thompson <dougthompson@xmission.com> |
| 2499 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2499 | L: linux-edac@vger.kernel.org |
| 2500 | W: bluesmoke.sourceforge.net | 2500 | W: bluesmoke.sourceforge.net |
| 2501 | S: Maintained | 2501 | S: Maintained |
| 2502 | F: drivers/edac/i5000_edac.c | 2502 | F: drivers/edac/i5000_edac.c |
| @@ -2525,21 +2525,21 @@ F: drivers/edac/i7core_edac.c | |||
| 2525 | EDAC-I82975X | 2525 | EDAC-I82975X |
| 2526 | M: Ranganathan Desikan <ravi@jetztechnologies.com> | 2526 | M: Ranganathan Desikan <ravi@jetztechnologies.com> |
| 2527 | M: "Arvind R." <arvino55@gmail.com> | 2527 | M: "Arvind R." <arvino55@gmail.com> |
| 2528 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2528 | L: linux-edac@vger.kernel.org |
| 2529 | W: bluesmoke.sourceforge.net | 2529 | W: bluesmoke.sourceforge.net |
| 2530 | S: Maintained | 2530 | S: Maintained |
| 2531 | F: drivers/edac/i82975x_edac.c | 2531 | F: drivers/edac/i82975x_edac.c |
| 2532 | 2532 | ||
| 2533 | EDAC-PASEMI | 2533 | EDAC-PASEMI |
| 2534 | M: Egor Martovetsky <egor@pasemi.com> | 2534 | M: Egor Martovetsky <egor@pasemi.com> |
| 2535 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2535 | L: linux-edac@vger.kernel.org |
| 2536 | W: bluesmoke.sourceforge.net | 2536 | W: bluesmoke.sourceforge.net |
| 2537 | S: Maintained | 2537 | S: Maintained |
| 2538 | F: drivers/edac/pasemi_edac.c | 2538 | F: drivers/edac/pasemi_edac.c |
| 2539 | 2539 | ||
| 2540 | EDAC-R82600 | 2540 | EDAC-R82600 |
| 2541 | M: Tim Small <tim@buttersideup.com> | 2541 | M: Tim Small <tim@buttersideup.com> |
| 2542 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 2542 | L: linux-edac@vger.kernel.org |
| 2543 | W: bluesmoke.sourceforge.net | 2543 | W: bluesmoke.sourceforge.net |
| 2544 | S: Maintained | 2544 | S: Maintained |
| 2545 | F: drivers/edac/r82600_edac.c | 2545 | F: drivers/edac/r82600_edac.c |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 11270ca22c0a..96033e2d6845 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
| @@ -12,7 +12,7 @@ config TILE | |||
| 12 | select GENERIC_PENDING_IRQ if SMP | 12 | select GENERIC_PENDING_IRQ if SMP |
| 13 | select GENERIC_IRQ_SHOW | 13 | select GENERIC_IRQ_SHOW |
| 14 | select SYS_HYPERVISOR | 14 | select SYS_HYPERVISOR |
| 15 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386 | 15 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 16 | 16 | ||
| 17 | # FIXME: investigate whether we need/want these options. | 17 | # FIXME: investigate whether we need/want these options. |
| 18 | # select HAVE_IOREMAP_PROT | 18 | # select HAVE_IOREMAP_PROT |
| @@ -69,6 +69,9 @@ config ARCH_PHYS_ADDR_T_64BIT | |||
| 69 | config ARCH_DMA_ADDR_T_64BIT | 69 | config ARCH_DMA_ADDR_T_64BIT |
| 70 | def_bool y | 70 | def_bool y |
| 71 | 71 | ||
| 72 | config NEED_DMA_MAP_STATE | ||
| 73 | def_bool y | ||
| 74 | |||
| 72 | config LOCKDEP_SUPPORT | 75 | config LOCKDEP_SUPPORT |
| 73 | def_bool y | 76 | def_bool y |
| 74 | 77 | ||
| @@ -118,7 +121,7 @@ config 64BIT | |||
| 118 | 121 | ||
| 119 | config ARCH_DEFCONFIG | 122 | config ARCH_DEFCONFIG |
| 120 | string | 123 | string |
| 121 | default "arch/tile/configs/tile_defconfig" if !TILEGX | 124 | default "arch/tile/configs/tilepro_defconfig" if !TILEGX |
| 122 | default "arch/tile/configs/tilegx_defconfig" if TILEGX | 125 | default "arch/tile/configs/tilegx_defconfig" if TILEGX |
| 123 | 126 | ||
| 124 | source "init/Kconfig" | 127 | source "init/Kconfig" |
| @@ -240,6 +243,7 @@ endchoice | |||
| 240 | 243 | ||
| 241 | config PAGE_OFFSET | 244 | config PAGE_OFFSET |
| 242 | hex | 245 | hex |
| 246 | depends on !64BIT | ||
| 243 | default 0xF0000000 if VMSPLIT_3_75G | 247 | default 0xF0000000 if VMSPLIT_3_75G |
| 244 | default 0xE0000000 if VMSPLIT_3_5G | 248 | default 0xE0000000 if VMSPLIT_3_5G |
| 245 | default 0xB0000000 if VMSPLIT_2_75G | 249 | default 0xB0000000 if VMSPLIT_2_75G |
diff --git a/arch/tile/Makefile b/arch/tile/Makefile index 17acce70569b..9520bc5a4b7f 100644 --- a/arch/tile/Makefile +++ b/arch/tile/Makefile | |||
| @@ -30,7 +30,8 @@ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"") | |||
| 30 | KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) | 30 | KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) |
| 31 | endif | 31 | endif |
| 32 | 32 | ||
| 33 | LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | 33 | LIBGCC_PATH := \ |
| 34 | $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) | ||
| 34 | 35 | ||
| 35 | # Provide the path to use for "make defconfig". | 36 | # Provide the path to use for "make defconfig". |
| 36 | KBUILD_DEFCONFIG := $(ARCH)_defconfig | 37 | KBUILD_DEFCONFIG := $(ARCH)_defconfig |
| @@ -53,8 +54,6 @@ libs-y += $(LIBGCC_PATH) | |||
| 53 | # See arch/tile/Kbuild for content of core part of the kernel | 54 | # See arch/tile/Kbuild for content of core part of the kernel |
| 54 | core-y += arch/tile/ | 55 | core-y += arch/tile/ |
| 55 | 56 | ||
| 56 | core-$(CONFIG_KVM) += arch/tile/kvm/ | ||
| 57 | |||
| 58 | ifdef TILERA_ROOT | 57 | ifdef TILERA_ROOT |
| 59 | INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot | 58 | INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot |
| 60 | endif | 59 | endif |
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h index f548efeb2de3..d6ba449b5363 100644 --- a/arch/tile/include/arch/spr_def.h +++ b/arch/tile/include/arch/spr_def.h | |||
| @@ -60,8 +60,8 @@ | |||
| 60 | _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) | 60 | _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) |
| 61 | #define SPR_IPI_EVENT_RESET_K \ | 61 | #define SPR_IPI_EVENT_RESET_K \ |
| 62 | _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) | 62 | _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) |
| 63 | #define SPR_IPI_MASK_SET_K \ | 63 | #define SPR_IPI_EVENT_SET_K \ |
| 64 | _concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) | 64 | _concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,) |
| 65 | #define INT_IPI_K \ | 65 | #define INT_IPI_K \ |
| 66 | _concat4(INT_IPI_, CONFIG_KERNEL_PL,,) | 66 | _concat4(INT_IPI_, CONFIG_KERNEL_PL,,) |
| 67 | 67 | ||
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index bb696da5d7cd..f2461429a4a4 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | #ifndef _ASM_TILE_ATOMIC_H | 17 | #ifndef _ASM_TILE_ATOMIC_H |
| 18 | #define _ASM_TILE_ATOMIC_H | 18 | #define _ASM_TILE_ATOMIC_H |
| 19 | 19 | ||
| 20 | #include <asm/cmpxchg.h> | ||
| 21 | |||
| 20 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
| 21 | 23 | ||
| 22 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
| @@ -121,54 +123,6 @@ static inline int atomic_read(const atomic_t *v) | |||
| 121 | */ | 123 | */ |
| 122 | #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) | 124 | #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) |
| 123 | 125 | ||
| 124 | /* Nonexistent functions intended to cause link errors. */ | ||
| 125 | extern unsigned long __xchg_called_with_bad_pointer(void); | ||
| 126 | extern unsigned long __cmpxchg_called_with_bad_pointer(void); | ||
| 127 | |||
| 128 | #define xchg(ptr, x) \ | ||
| 129 | ({ \ | ||
| 130 | typeof(*(ptr)) __x; \ | ||
| 131 | switch (sizeof(*(ptr))) { \ | ||
| 132 | case 4: \ | ||
| 133 | __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ | ||
| 134 | (atomic_t *)(ptr), \ | ||
| 135 | (u32)(typeof((x)-(x)))(x)); \ | ||
| 136 | break; \ | ||
| 137 | case 8: \ | ||
| 138 | __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ | ||
| 139 | (atomic64_t *)(ptr), \ | ||
| 140 | (u64)(typeof((x)-(x)))(x)); \ | ||
| 141 | break; \ | ||
| 142 | default: \ | ||
| 143 | __xchg_called_with_bad_pointer(); \ | ||
| 144 | } \ | ||
| 145 | __x; \ | ||
| 146 | }) | ||
| 147 | |||
| 148 | #define cmpxchg(ptr, o, n) \ | ||
| 149 | ({ \ | ||
| 150 | typeof(*(ptr)) __x; \ | ||
| 151 | switch (sizeof(*(ptr))) { \ | ||
| 152 | case 4: \ | ||
| 153 | __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ | ||
| 154 | (atomic_t *)(ptr), \ | ||
| 155 | (u32)(typeof((o)-(o)))(o), \ | ||
| 156 | (u32)(typeof((n)-(n)))(n)); \ | ||
| 157 | break; \ | ||
| 158 | case 8: \ | ||
| 159 | __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ | ||
| 160 | (atomic64_t *)(ptr), \ | ||
| 161 | (u64)(typeof((o)-(o)))(o), \ | ||
| 162 | (u64)(typeof((n)-(n)))(n)); \ | ||
| 163 | break; \ | ||
| 164 | default: \ | ||
| 165 | __cmpxchg_called_with_bad_pointer(); \ | ||
| 166 | } \ | ||
| 167 | __x; \ | ||
| 168 | }) | ||
| 169 | |||
| 170 | #define tas(ptr) (xchg((ptr), 1)) | ||
| 171 | |||
| 172 | #endif /* __ASSEMBLY__ */ | 126 | #endif /* __ASSEMBLY__ */ |
| 173 | 127 | ||
| 174 | #ifndef __tilegx__ | 128 | #ifndef __tilegx__ |
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 466dc4a39a4f..54d1da826f93 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
| @@ -200,7 +200,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |||
| 200 | * @u: ...unless v is equal to u. | 200 | * @u: ...unless v is equal to u. |
| 201 | * | 201 | * |
| 202 | * Atomically adds @a to @v, so long as @v was not already @u. | 202 | * Atomically adds @a to @v, so long as @v was not already @u. |
| 203 | * Returns the old value of @v. | 203 | * Returns non-zero if @v was not @u, and zero otherwise. |
| 204 | */ | 204 | */ |
| 205 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | 205 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) |
| 206 | { | 206 | { |
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index 58d021a9834f..60b87ee54fb8 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h | |||
| @@ -38,10 +38,10 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | |||
| 38 | 38 | ||
| 39 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) | 39 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) |
| 40 | { | 40 | { |
| 41 | unsigned long old, mask = (1UL << (nr % BITS_PER_LONG)); | 41 | unsigned long mask = (1UL << (nr % BITS_PER_LONG)); |
| 42 | long guess, oldval; | 42 | unsigned long guess, oldval; |
| 43 | addr += nr / BITS_PER_LONG; | 43 | addr += nr / BITS_PER_LONG; |
| 44 | old = *addr; | 44 | oldval = *addr; |
| 45 | do { | 45 | do { |
| 46 | guess = oldval; | 46 | guess = oldval; |
| 47 | oldval = atomic64_cmpxchg((atomic64_t *)addr, | 47 | oldval = atomic64_cmpxchg((atomic64_t *)addr, |
| @@ -85,7 +85,7 @@ static inline int test_and_change_bit(unsigned nr, | |||
| 85 | volatile unsigned long *addr) | 85 | volatile unsigned long *addr) |
| 86 | { | 86 | { |
| 87 | unsigned long mask = (1UL << (nr % BITS_PER_LONG)); | 87 | unsigned long mask = (1UL << (nr % BITS_PER_LONG)); |
| 88 | long guess, oldval = *addr; | 88 | unsigned long guess, oldval; |
| 89 | addr += nr / BITS_PER_LONG; | 89 | addr += nr / BITS_PER_LONG; |
| 90 | oldval = *addr; | 90 | oldval = *addr; |
| 91 | do { | 91 | do { |
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h new file mode 100644 index 000000000000..276f067e3640 --- /dev/null +++ b/arch/tile/include/asm/cmpxchg.h | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* | ||
| 2 | * cmpxchg.h -- forked from asm/atomic.h with this copyright: | ||
| 3 | * | ||
| 4 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License | ||
| 8 | * as published by the Free Software Foundation, version 2. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
| 13 | * NON INFRINGEMENT. See the GNU General Public License for | ||
| 14 | * more details. | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef _ASM_TILE_CMPXCHG_H | ||
| 19 | #define _ASM_TILE_CMPXCHG_H | ||
| 20 | |||
| 21 | #ifndef __ASSEMBLY__ | ||
| 22 | |||
| 23 | /* Nonexistent functions intended to cause link errors. */ | ||
| 24 | extern unsigned long __xchg_called_with_bad_pointer(void); | ||
| 25 | extern unsigned long __cmpxchg_called_with_bad_pointer(void); | ||
| 26 | |||
| 27 | #define xchg(ptr, x) \ | ||
| 28 | ({ \ | ||
| 29 | typeof(*(ptr)) __x; \ | ||
| 30 | switch (sizeof(*(ptr))) { \ | ||
| 31 | case 4: \ | ||
| 32 | __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ | ||
| 33 | (atomic_t *)(ptr), \ | ||
| 34 | (u32)(typeof((x)-(x)))(x)); \ | ||
| 35 | break; \ | ||
| 36 | case 8: \ | ||
| 37 | __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ | ||
| 38 | (atomic64_t *)(ptr), \ | ||
| 39 | (u64)(typeof((x)-(x)))(x)); \ | ||
| 40 | break; \ | ||
| 41 | default: \ | ||
| 42 | __xchg_called_with_bad_pointer(); \ | ||
| 43 | } \ | ||
| 44 | __x; \ | ||
| 45 | }) | ||
| 46 | |||
| 47 | #define cmpxchg(ptr, o, n) \ | ||
| 48 | ({ \ | ||
| 49 | typeof(*(ptr)) __x; \ | ||
| 50 | switch (sizeof(*(ptr))) { \ | ||
| 51 | case 4: \ | ||
| 52 | __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ | ||
| 53 | (atomic_t *)(ptr), \ | ||
| 54 | (u32)(typeof((o)-(o)))(o), \ | ||
| 55 | (u32)(typeof((n)-(n)))(n)); \ | ||
| 56 | break; \ | ||
| 57 | case 8: \ | ||
| 58 | __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ | ||
| 59 | (atomic64_t *)(ptr), \ | ||
| 60 | (u64)(typeof((o)-(o)))(o), \ | ||
| 61 | (u64)(typeof((n)-(n)))(n)); \ | ||
| 62 | break; \ | ||
| 63 | default: \ | ||
| 64 | __cmpxchg_called_with_bad_pointer(); \ | ||
| 65 | } \ | ||
| 66 | __x; \ | ||
| 67 | }) | ||
| 68 | |||
| 69 | #define tas(ptr) (xchg((ptr), 1)) | ||
| 70 | |||
| 71 | #endif /* __ASSEMBLY__ */ | ||
| 72 | |||
| 73 | #endif /* _ASM_TILE_CMPXCHG_H */ | ||
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index f80f8ceabc67..33cff9a3058b 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | #define NR_IRQS 32 | 21 | #define NR_IRQS 32 |
| 22 | 22 | ||
| 23 | /* IRQ numbers used for linux IPIs. */ | 23 | /* IRQ numbers used for linux IPIs. */ |
| 24 | #define IRQ_RESCHEDULE 1 | 24 | #define IRQ_RESCHEDULE 0 |
| 25 | 25 | ||
| 26 | #define irq_canonicalize(irq) (irq) | 26 | #define irq_canonicalize(irq) (irq) |
| 27 | 27 | ||
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h index 72be5904e020..5f8b6a095fd8 100644 --- a/arch/tile/include/asm/spinlock_64.h +++ b/arch/tile/include/asm/spinlock_64.h | |||
| @@ -137,7 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
| 137 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 137 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 138 | { | 138 | { |
| 139 | __insn_mf(); | 139 | __insn_mf(); |
| 140 | rw->lock = 0; | 140 | __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */ |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 143 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h index 4d97a2db932e..0e9d382a2d45 100644 --- a/arch/tile/include/asm/stack.h +++ b/arch/tile/include/asm/stack.h | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | struct KBacktraceIterator { | 25 | struct KBacktraceIterator { |
| 26 | BacktraceIterator it; | 26 | BacktraceIterator it; |
| 27 | struct task_struct *task; /* task we are backtracing */ | 27 | struct task_struct *task; /* task we are backtracing */ |
| 28 | pte_t *pgtable; /* page table for user space access */ | ||
| 29 | int end; /* iteration complete. */ | 28 | int end; /* iteration complete. */ |
| 30 | int new_context; /* new context is starting */ | 29 | int new_context; /* new context is starting */ |
| 31 | int profile; /* profiling, so stop on async intrpt */ | 30 | int profile; /* profiling, so stop on async intrpt */ |
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h index 5f20f920f932..e28c3df4176a 100644 --- a/arch/tile/include/asm/traps.h +++ b/arch/tile/include/asm/traps.h | |||
| @@ -64,7 +64,11 @@ void do_breakpoint(struct pt_regs *, int fault_num); | |||
| 64 | 64 | ||
| 65 | 65 | ||
| 66 | #ifdef __tilegx__ | 66 | #ifdef __tilegx__ |
| 67 | /* kernel/single_step.c */ | ||
| 67 | void gx_singlestep_handle(struct pt_regs *, int fault_num); | 68 | void gx_singlestep_handle(struct pt_regs *, int fault_num); |
| 69 | |||
| 70 | /* kernel/intvec_64.S */ | ||
| 71 | void fill_ra_stack(void); | ||
| 68 | #endif | 72 | #endif |
| 69 | 73 | ||
| 70 | #endif /* _ASM_TILE_SYSCALLS_H */ | 74 | #endif /* _ASM_TILE_TRAPS_H */ |
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 431e9ae60488..ec91568df880 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
| @@ -85,6 +85,7 @@ STD_ENTRY(cpu_idle_on_new_stack) | |||
| 85 | /* Loop forever on a nap during SMP boot. */ | 85 | /* Loop forever on a nap during SMP boot. */ |
| 86 | STD_ENTRY(smp_nap) | 86 | STD_ENTRY(smp_nap) |
| 87 | nap | 87 | nap |
| 88 | nop /* avoid provoking the icache prefetch with a jump */ | ||
| 88 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ | 89 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ |
| 89 | jrp lr /* clue in the backtracer */ | 90 | jrp lr /* clue in the backtracer */ |
| 90 | STD_ENDPROC(smp_nap) | 91 | STD_ENDPROC(smp_nap) |
| @@ -105,5 +106,6 @@ STD_ENTRY(_cpu_idle) | |||
| 105 | .global _cpu_idle_nap | 106 | .global _cpu_idle_nap |
| 106 | _cpu_idle_nap: | 107 | _cpu_idle_nap: |
| 107 | nap | 108 | nap |
| 109 | nop /* avoid provoking the icache prefetch with a jump */ | ||
| 108 | jrp lr | 110 | jrp lr |
| 109 | STD_ENDPROC(_cpu_idle) | 111 | STD_ENDPROC(_cpu_idle) |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index aecc8ed5f39b..5d56a1ef5ba5 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
| @@ -799,6 +799,10 @@ handle_interrupt: | |||
| 799 | * This routine takes a boolean in r30 indicating if this is an NMI. | 799 | * This routine takes a boolean in r30 indicating if this is an NMI. |
| 800 | * If so, we also expect a boolean in r31 indicating whether to | 800 | * If so, we also expect a boolean in r31 indicating whether to |
| 801 | * re-enable the oprofile interrupts. | 801 | * re-enable the oprofile interrupts. |
| 802 | * | ||
| 803 | * Note that .Lresume_userspace is jumped to directly in several | ||
| 804 | * places, and we need to make sure r30 is set correctly in those | ||
| 805 | * callers as well. | ||
| 802 | */ | 806 | */ |
| 803 | STD_ENTRY(interrupt_return) | 807 | STD_ENTRY(interrupt_return) |
| 804 | /* If we're resuming to kernel space, don't check thread flags. */ | 808 | /* If we're resuming to kernel space, don't check thread flags. */ |
| @@ -1237,7 +1241,10 @@ handle_syscall: | |||
| 1237 | bzt r30, 1f | 1241 | bzt r30, 1f |
| 1238 | jal do_syscall_trace | 1242 | jal do_syscall_trace |
| 1239 | FEEDBACK_REENTER(handle_syscall) | 1243 | FEEDBACK_REENTER(handle_syscall) |
| 1240 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | 1244 | 1: { |
| 1245 | movei r30, 0 /* not an NMI */ | ||
| 1246 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1247 | } | ||
| 1241 | 1248 | ||
| 1242 | .Linvalid_syscall: | 1249 | .Linvalid_syscall: |
| 1243 | /* Report an invalid syscall back to the user program */ | 1250 | /* Report an invalid syscall back to the user program */ |
| @@ -1246,7 +1253,10 @@ handle_syscall: | |||
| 1246 | movei r28, -ENOSYS | 1253 | movei r28, -ENOSYS |
| 1247 | } | 1254 | } |
| 1248 | sw r29, r28 | 1255 | sw r29, r28 |
| 1249 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1256 | { |
| 1257 | movei r30, 0 /* not an NMI */ | ||
| 1258 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1259 | } | ||
| 1250 | STD_ENDPROC(handle_syscall) | 1260 | STD_ENDPROC(handle_syscall) |
| 1251 | 1261 | ||
| 1252 | /* Return the address for oprofile to suppress in backtraces. */ | 1262 | /* Return the address for oprofile to suppress in backtraces. */ |
| @@ -1262,7 +1272,10 @@ STD_ENTRY(ret_from_fork) | |||
| 1262 | jal sim_notify_fork | 1272 | jal sim_notify_fork |
| 1263 | jal schedule_tail | 1273 | jal schedule_tail |
| 1264 | FEEDBACK_REENTER(ret_from_fork) | 1274 | FEEDBACK_REENTER(ret_from_fork) |
| 1265 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1275 | { |
| 1276 | movei r30, 0 /* not an NMI */ | ||
| 1277 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1278 | } | ||
| 1266 | STD_ENDPROC(ret_from_fork) | 1279 | STD_ENDPROC(ret_from_fork) |
| 1267 | 1280 | ||
| 1268 | /* | 1281 | /* |
| @@ -1376,7 +1389,10 @@ handle_ill: | |||
| 1376 | 1389 | ||
| 1377 | jal send_sigtrap /* issue a SIGTRAP */ | 1390 | jal send_sigtrap /* issue a SIGTRAP */ |
| 1378 | FEEDBACK_REENTER(handle_ill) | 1391 | FEEDBACK_REENTER(handle_ill) |
| 1379 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1392 | { |
| 1393 | movei r30, 0 /* not an NMI */ | ||
| 1394 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1395 | } | ||
| 1380 | 1396 | ||
| 1381 | .Ldispatch_normal_ill: | 1397 | .Ldispatch_normal_ill: |
| 1382 | { | 1398 | { |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 79c93e10ba27..49d9d6621682 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <asm/irqflags.h> | 22 | #include <asm/irqflags.h> |
| 23 | #include <asm/asm-offsets.h> | 23 | #include <asm/asm-offsets.h> |
| 24 | #include <asm/types.h> | 24 | #include <asm/types.h> |
| 25 | #include <asm/signal.h> | ||
| 25 | #include <hv/hypervisor.h> | 26 | #include <hv/hypervisor.h> |
| 26 | #include <arch/abi.h> | 27 | #include <arch/abi.h> |
| 27 | #include <arch/interrupts.h> | 28 | #include <arch/interrupts.h> |
| @@ -605,6 +606,10 @@ handle_interrupt: | |||
| 605 | * This routine takes a boolean in r30 indicating if this is an NMI. | 606 | * This routine takes a boolean in r30 indicating if this is an NMI. |
| 606 | * If so, we also expect a boolean in r31 indicating whether to | 607 | * If so, we also expect a boolean in r31 indicating whether to |
| 607 | * re-enable the oprofile interrupts. | 608 | * re-enable the oprofile interrupts. |
| 609 | * | ||
| 610 | * Note that .Lresume_userspace is jumped to directly in several | ||
| 611 | * places, and we need to make sure r30 is set correctly in those | ||
| 612 | * callers as well. | ||
| 608 | */ | 613 | */ |
| 609 | STD_ENTRY(interrupt_return) | 614 | STD_ENTRY(interrupt_return) |
| 610 | /* If we're resuming to kernel space, don't check thread flags. */ | 615 | /* If we're resuming to kernel space, don't check thread flags. */ |
| @@ -1039,11 +1044,28 @@ handle_syscall: | |||
| 1039 | 1044 | ||
| 1040 | /* Do syscall trace again, if requested. */ | 1045 | /* Do syscall trace again, if requested. */ |
| 1041 | ld r30, r31 | 1046 | ld r30, r31 |
| 1042 | andi r30, r30, _TIF_SYSCALL_TRACE | 1047 | andi r0, r30, _TIF_SYSCALL_TRACE |
| 1043 | beqzt r30, 1f | 1048 | { |
| 1049 | andi r0, r30, _TIF_SINGLESTEP | ||
| 1050 | beqzt r0, 1f | ||
| 1051 | } | ||
| 1044 | jal do_syscall_trace | 1052 | jal do_syscall_trace |
| 1045 | FEEDBACK_REENTER(handle_syscall) | 1053 | FEEDBACK_REENTER(handle_syscall) |
| 1046 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | 1054 | andi r0, r30, _TIF_SINGLESTEP |
| 1055 | |||
| 1056 | 1: beqzt r0, 2f | ||
| 1057 | |||
| 1058 | /* Single stepping -- notify ptrace. */ | ||
| 1059 | { | ||
| 1060 | movei r0, SIGTRAP | ||
| 1061 | jal ptrace_notify | ||
| 1062 | } | ||
| 1063 | FEEDBACK_REENTER(handle_syscall) | ||
| 1064 | |||
| 1065 | 2: { | ||
| 1066 | movei r30, 0 /* not an NMI */ | ||
| 1067 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1068 | } | ||
| 1047 | 1069 | ||
| 1048 | .Lcompat_syscall: | 1070 | .Lcompat_syscall: |
| 1049 | /* | 1071 | /* |
| @@ -1077,7 +1099,10 @@ handle_syscall: | |||
| 1077 | movei r28, -ENOSYS | 1099 | movei r28, -ENOSYS |
| 1078 | } | 1100 | } |
| 1079 | st r29, r28 | 1101 | st r29, r28 |
| 1080 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1102 | { |
| 1103 | movei r30, 0 /* not an NMI */ | ||
| 1104 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1105 | } | ||
| 1081 | STD_ENDPROC(handle_syscall) | 1106 | STD_ENDPROC(handle_syscall) |
| 1082 | 1107 | ||
| 1083 | /* Return the address for oprofile to suppress in backtraces. */ | 1108 | /* Return the address for oprofile to suppress in backtraces. */ |
| @@ -1093,7 +1118,10 @@ STD_ENTRY(ret_from_fork) | |||
| 1093 | jal sim_notify_fork | 1118 | jal sim_notify_fork |
| 1094 | jal schedule_tail | 1119 | jal schedule_tail |
| 1095 | FEEDBACK_REENTER(ret_from_fork) | 1120 | FEEDBACK_REENTER(ret_from_fork) |
| 1096 | j .Lresume_userspace | 1121 | { |
| 1122 | movei r30, 0 /* not an NMI */ | ||
| 1123 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
| 1124 | } | ||
| 1097 | STD_ENDPROC(ret_from_fork) | 1125 | STD_ENDPROC(ret_from_fork) |
| 1098 | 1126 | ||
| 1099 | /* Various stub interrupt handlers and syscall handlers */ | 1127 | /* Various stub interrupt handlers and syscall handlers */ |
| @@ -1156,6 +1184,18 @@ int_unalign: | |||
| 1156 | push_extra_callee_saves r0 | 1184 | push_extra_callee_saves r0 |
| 1157 | j do_trap | 1185 | j do_trap |
| 1158 | 1186 | ||
| 1187 | /* Fill the return address stack with nonzero entries. */ | ||
| 1188 | STD_ENTRY(fill_ra_stack) | ||
| 1189 | { | ||
| 1190 | move r0, lr | ||
| 1191 | jal 1f | ||
| 1192 | } | ||
| 1193 | 1: jal 2f | ||
| 1194 | 2: jal 3f | ||
| 1195 | 3: jal 4f | ||
| 1196 | 4: jrp r0 | ||
| 1197 | STD_ENDPROC(fill_ra_stack) | ||
| 1198 | |||
| 1159 | /* Include .intrpt1 array of interrupt vectors */ | 1199 | /* Include .intrpt1 array of interrupt vectors */ |
| 1160 | .section ".intrpt1", "ax" | 1200 | .section ".intrpt1", "ax" |
| 1161 | 1201 | ||
| @@ -1166,7 +1206,7 @@ int_unalign: | |||
| 1166 | #define do_hardwall_trap bad_intr | 1206 | #define do_hardwall_trap bad_intr |
| 1167 | #endif | 1207 | #endif |
| 1168 | 1208 | ||
| 1169 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr | 1209 | int_hand INT_MEM_ERROR, MEM_ERROR, do_trap |
| 1170 | int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr | 1210 | int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr |
| 1171 | #if CONFIG_KERNEL_PL == 2 | 1211 | #if CONFIG_KERNEL_PL == 2 |
| 1172 | int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle | 1212 | int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle |
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index b90ab9925674..98d476920106 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c | |||
| @@ -67,6 +67,8 @@ void *module_alloc(unsigned long size) | |||
| 67 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); | 67 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); |
| 68 | if (!area) | 68 | if (!area) |
| 69 | goto error; | 69 | goto error; |
| 70 | area->nr_pages = npages; | ||
| 71 | area->pages = pages; | ||
| 70 | 72 | ||
| 71 | if (map_vm_area(area, prot_rwx, &pages)) { | 73 | if (map_vm_area(area, prot_rwx, &pages)) { |
| 72 | vunmap(area->addr); | 74 | vunmap(area->addr); |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 30caecac94dc..2d5ef617bb39 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/tracehook.h> | 28 | #include <linux/tracehook.h> |
| 29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
| 30 | #include <asm/stack.h> | 30 | #include <asm/stack.h> |
| 31 | #include <asm/switch_to.h> | ||
| 31 | #include <asm/homecache.h> | 32 | #include <asm/homecache.h> |
| 32 | #include <asm/syscalls.h> | 33 | #include <asm/syscalls.h> |
| 33 | #include <asm/traps.h> | 34 | #include <asm/traps.h> |
| @@ -285,7 +286,7 @@ struct task_struct *validate_current(void) | |||
| 285 | static struct task_struct corrupt = { .comm = "<corrupt>" }; | 286 | static struct task_struct corrupt = { .comm = "<corrupt>" }; |
| 286 | struct task_struct *tsk = current; | 287 | struct task_struct *tsk = current; |
| 287 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || | 288 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || |
| 288 | (void *)tsk > high_memory || | 289 | (high_memory && (void *)tsk > high_memory) || |
| 289 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { | 290 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { |
| 290 | pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); | 291 | pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); |
| 291 | tsk = &corrupt; | 292 | tsk = &corrupt; |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 92a94f4920ad..bff23f476110 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
| @@ -103,13 +103,11 @@ unsigned long __initdata pci_reserve_end_pfn = -1U; | |||
| 103 | 103 | ||
| 104 | static int __init setup_maxmem(char *str) | 104 | static int __init setup_maxmem(char *str) |
| 105 | { | 105 | { |
| 106 | long maxmem_mb; | 106 | unsigned long long maxmem; |
| 107 | if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || | 107 | if (str == NULL || (maxmem = memparse(str, NULL)) == 0) |
| 108 | maxmem_mb == 0) | ||
| 109 | return -EINVAL; | 108 | return -EINVAL; |
| 110 | 109 | ||
| 111 | maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << | 110 | maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); |
| 112 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
| 113 | pr_info("Forcing RAM used to no more than %dMB\n", | 111 | pr_info("Forcing RAM used to no more than %dMB\n", |
| 114 | maxmem_pfn >> (20 - PAGE_SHIFT)); | 112 | maxmem_pfn >> (20 - PAGE_SHIFT)); |
| 115 | return 0; | 113 | return 0; |
| @@ -119,14 +117,15 @@ early_param("maxmem", setup_maxmem); | |||
| 119 | static int __init setup_maxnodemem(char *str) | 117 | static int __init setup_maxnodemem(char *str) |
| 120 | { | 118 | { |
| 121 | char *endp; | 119 | char *endp; |
| 122 | long maxnodemem_mb, node; | 120 | unsigned long long maxnodemem; |
| 121 | long node; | ||
| 123 | 122 | ||
| 124 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | 123 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; |
| 125 | if (node >= MAX_NUMNODES || *endp != ':' || | 124 | if (node >= MAX_NUMNODES || *endp != ':') |
| 126 | strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) | ||
| 127 | return -EINVAL; | 125 | return -EINVAL; |
| 128 | 126 | ||
| 129 | maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << | 127 | maxnodemem = memparse(endp+1, NULL); |
| 128 | maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << | ||
| 130 | (HPAGE_SHIFT - PAGE_SHIFT); | 129 | (HPAGE_SHIFT - PAGE_SHIFT); |
| 131 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", | 130 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", |
| 132 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); | 131 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); |
| @@ -913,6 +912,13 @@ void __cpuinit setup_cpu(int boot) | |||
| 913 | 912 | ||
| 914 | #ifdef CONFIG_BLK_DEV_INITRD | 913 | #ifdef CONFIG_BLK_DEV_INITRD |
| 915 | 914 | ||
| 915 | /* | ||
| 916 | * Note that the kernel can potentially support other compression | ||
| 917 | * techniques than gz, though we don't do so by default. If we ever | ||
| 918 | * decide to do so we can either look for other filename extensions, | ||
| 919 | * or just allow a file with this name to be compressed with an | ||
| 920 | * arbitrary compressor (somewhat counterintuitively). | ||
| 921 | */ | ||
| 916 | static int __initdata set_initramfs_file; | 922 | static int __initdata set_initramfs_file; |
| 917 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | 923 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; |
| 918 | 924 | ||
| @@ -928,9 +934,9 @@ static int __init setup_initramfs_file(char *str) | |||
| 928 | early_param("initramfs_file", setup_initramfs_file); | 934 | early_param("initramfs_file", setup_initramfs_file); |
| 929 | 935 | ||
| 930 | /* | 936 | /* |
| 931 | * We look for an additional "initramfs.cpio.gz" file in the hvfs. | 937 | * We look for an "initramfs.cpio.gz" file in the hvfs. |
| 932 | * If there is one, we allocate some memory for it and it will be | 938 | * If there is one, we allocate some memory for it and it will be |
| 933 | * unpacked to the initramfs after any built-in initramfs_data. | 939 | * unpacked to the initramfs. |
| 934 | */ | 940 | */ |
| 935 | static void __init load_hv_initrd(void) | 941 | static void __init load_hv_initrd(void) |
| 936 | { | 942 | { |
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index bc1eb586e24d..9efbc1391b3c 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c | |||
| @@ -153,6 +153,25 @@ static tile_bundle_bits rewrite_load_store_unaligned( | |||
| 153 | if (((unsigned long)addr % size) == 0) | 153 | if (((unsigned long)addr % size) == 0) |
| 154 | return bundle; | 154 | return bundle; |
| 155 | 155 | ||
| 156 | /* | ||
| 157 | * Return SIGBUS with the unaligned address, if requested. | ||
| 158 | * Note that we return SIGBUS even for completely invalid addresses | ||
| 159 | * as long as they are in fact unaligned; this matches what the | ||
| 160 | * tilepro hardware would be doing, if it could provide us with the | ||
| 161 | * actual bad address in an SPR, which it doesn't. | ||
| 162 | */ | ||
| 163 | if (unaligned_fixup == 0) { | ||
| 164 | siginfo_t info = { | ||
| 165 | .si_signo = SIGBUS, | ||
| 166 | .si_code = BUS_ADRALN, | ||
| 167 | .si_addr = addr | ||
| 168 | }; | ||
| 169 | trace_unhandled_signal("unaligned trap", regs, | ||
| 170 | (unsigned long)addr, SIGBUS); | ||
| 171 | force_sig_info(info.si_signo, &info, current); | ||
| 172 | return (tilepro_bundle_bits) 0; | ||
| 173 | } | ||
| 174 | |||
| 156 | #ifndef __LITTLE_ENDIAN | 175 | #ifndef __LITTLE_ENDIAN |
| 157 | # error We assume little-endian representation with copy_xx_user size 2 here | 176 | # error We assume little-endian representation with copy_xx_user size 2 here |
| 158 | #endif | 177 | #endif |
| @@ -192,18 +211,6 @@ static tile_bundle_bits rewrite_load_store_unaligned( | |||
| 192 | return (tile_bundle_bits) 0; | 211 | return (tile_bundle_bits) 0; |
| 193 | } | 212 | } |
| 194 | 213 | ||
| 195 | if (unaligned_fixup == 0) { | ||
| 196 | siginfo_t info = { | ||
| 197 | .si_signo = SIGBUS, | ||
| 198 | .si_code = BUS_ADRALN, | ||
| 199 | .si_addr = addr | ||
| 200 | }; | ||
| 201 | trace_unhandled_signal("unaligned trap", regs, | ||
| 202 | (unsigned long)addr, SIGBUS); | ||
| 203 | force_sig_info(info.si_signo, &info, current); | ||
| 204 | return (tile_bundle_bits) 0; | ||
| 205 | } | ||
| 206 | |||
| 207 | if (unaligned_printk || unaligned_fixup_count == 0) { | 214 | if (unaligned_printk || unaligned_fixup_count == 0) { |
| 208 | pr_info("Process %d/%s: PC %#lx: Fixup of" | 215 | pr_info("Process %d/%s: PC %#lx: Fixup of" |
| 209 | " unaligned %s at %#lx.\n", | 216 | " unaligned %s at %#lx.\n", |
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index a44e103c5a63..91da0f721958 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c | |||
| @@ -103,7 +103,7 @@ static void smp_stop_cpu_interrupt(void) | |||
| 103 | set_cpu_online(smp_processor_id(), 0); | 103 | set_cpu_online(smp_processor_id(), 0); |
| 104 | arch_local_irq_disable_all(); | 104 | arch_local_irq_disable_all(); |
| 105 | for (;;) | 105 | for (;;) |
| 106 | asm("nap"); | 106 | asm("nap; nop"); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | /* This function calls the 'stop' function on all other CPUs in the system. */ | 109 | /* This function calls the 'stop' function on all other CPUs in the system. */ |
| @@ -113,6 +113,12 @@ void smp_send_stop(void) | |||
| 113 | send_IPI_allbutself(MSG_TAG_STOP_CPU); | 113 | send_IPI_allbutself(MSG_TAG_STOP_CPU); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | /* On panic, just wait; we may get an smp_send_stop() later on. */ | ||
| 117 | void panic_smp_self_stop(void) | ||
| 118 | { | ||
| 119 | while (1) | ||
| 120 | asm("nap; nop"); | ||
| 121 | } | ||
| 116 | 122 | ||
| 117 | /* | 123 | /* |
| 118 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. | 124 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. |
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 37ee4d037e0b..b2f44c28dda6 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
| @@ -21,10 +21,12 @@ | |||
| 21 | #include <linux/stacktrace.h> | 21 | #include <linux/stacktrace.h> |
| 22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
| 23 | #include <linux/mmzone.h> | 23 | #include <linux/mmzone.h> |
| 24 | #include <linux/dcache.h> | ||
| 25 | #include <linux/fs.h> | ||
| 24 | #include <asm/backtrace.h> | 26 | #include <asm/backtrace.h> |
| 25 | #include <asm/page.h> | 27 | #include <asm/page.h> |
| 26 | #include <asm/tlbflush.h> | ||
| 27 | #include <asm/ucontext.h> | 28 | #include <asm/ucontext.h> |
| 29 | #include <asm/switch_to.h> | ||
| 28 | #include <asm/sigframe.h> | 30 | #include <asm/sigframe.h> |
| 29 | #include <asm/stack.h> | 31 | #include <asm/stack.h> |
| 30 | #include <arch/abi.h> | 32 | #include <arch/abi.h> |
| @@ -44,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp) | |||
| 44 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | 46 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; |
| 45 | } | 47 | } |
| 46 | 48 | ||
| 47 | /* Is address valid for reading? */ | ||
| 48 | static int valid_address(struct KBacktraceIterator *kbt, unsigned long address) | ||
| 49 | { | ||
| 50 | HV_PTE *l1_pgtable = kbt->pgtable; | ||
| 51 | HV_PTE *l2_pgtable; | ||
| 52 | unsigned long pfn; | ||
| 53 | HV_PTE pte; | ||
| 54 | struct page *page; | ||
| 55 | |||
| 56 | if (l1_pgtable == NULL) | ||
| 57 | return 0; /* can't read user space in other tasks */ | ||
| 58 | |||
| 59 | #ifdef CONFIG_64BIT | ||
| 60 | /* Find the real l1_pgtable by looking in the l0_pgtable. */ | ||
| 61 | pte = l1_pgtable[HV_L0_INDEX(address)]; | ||
| 62 | if (!hv_pte_get_present(pte)) | ||
| 63 | return 0; | ||
| 64 | pfn = hv_pte_get_pfn(pte); | ||
| 65 | if (pte_huge(pte)) { | ||
| 66 | if (!pfn_valid(pfn)) { | ||
| 67 | pr_err("L0 huge page has bad pfn %#lx\n", pfn); | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
| 71 | } | ||
| 72 | page = pfn_to_page(pfn); | ||
| 73 | BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ | ||
| 74 | l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
| 75 | #endif | ||
| 76 | pte = l1_pgtable[HV_L1_INDEX(address)]; | ||
| 77 | if (!hv_pte_get_present(pte)) | ||
| 78 | return 0; | ||
| 79 | pfn = hv_pte_get_pfn(pte); | ||
| 80 | if (pte_huge(pte)) { | ||
| 81 | if (!pfn_valid(pfn)) { | ||
| 82 | pr_err("huge page has bad pfn %#lx\n", pfn); | ||
| 83 | return 0; | ||
| 84 | } | ||
| 85 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
| 86 | } | ||
| 87 | |||
| 88 | page = pfn_to_page(pfn); | ||
| 89 | if (PageHighMem(page)) { | ||
| 90 | pr_err("L2 page table not in LOWMEM (%#llx)\n", | ||
| 91 | HV_PFN_TO_CPA(pfn)); | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
| 95 | pte = l2_pgtable[HV_L2_INDEX(address)]; | ||
| 96 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
| 97 | } | ||
| 98 | |||
| 99 | /* Callback for backtracer; basically a glorified memcpy */ | 49 | /* Callback for backtracer; basically a glorified memcpy */ |
| 100 | static bool read_memory_func(void *result, unsigned long address, | 50 | static bool read_memory_func(void *result, unsigned long address, |
| 101 | unsigned int size, void *vkbt) | 51 | unsigned int size, void *vkbt) |
| 102 | { | 52 | { |
| 103 | int retval; | 53 | int retval; |
| 104 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | 54 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; |
| 55 | |||
| 56 | if (address == 0) | ||
| 57 | return 0; | ||
| 105 | if (__kernel_text_address(address)) { | 58 | if (__kernel_text_address(address)) { |
| 106 | /* OK to read kernel code. */ | 59 | /* OK to read kernel code. */ |
| 107 | } else if (address >= PAGE_OFFSET) { | 60 | } else if (address >= PAGE_OFFSET) { |
| 108 | /* We only tolerate kernel-space reads of this task's stack */ | 61 | /* We only tolerate kernel-space reads of this task's stack */ |
| 109 | if (!in_kernel_stack(kbt, address)) | 62 | if (!in_kernel_stack(kbt, address)) |
| 110 | return 0; | 63 | return 0; |
| 111 | } else if (!valid_address(kbt, address)) { | 64 | } else if (!kbt->is_current) { |
| 112 | return 0; /* invalid user-space address */ | 65 | return 0; /* can't read from other user address spaces */ |
| 113 | } | 66 | } |
| 114 | pagefault_disable(); | 67 | pagefault_disable(); |
| 115 | retval = __copy_from_user_inatomic(result, | 68 | retval = __copy_from_user_inatomic(result, |
| @@ -127,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
| 127 | unsigned long sp = kbt->it.sp; | 80 | unsigned long sp = kbt->it.sp; |
| 128 | struct pt_regs *p; | 81 | struct pt_regs *p; |
| 129 | 82 | ||
| 83 | if (sp % sizeof(long) != 0) | ||
| 84 | return NULL; | ||
| 130 | if (!in_kernel_stack(kbt, sp)) | 85 | if (!in_kernel_stack(kbt, sp)) |
| 131 | return NULL; | 86 | return NULL; |
| 132 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | 87 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) |
| @@ -169,27 +124,27 @@ static int is_sigreturn(unsigned long pc) | |||
| 169 | } | 124 | } |
| 170 | 125 | ||
| 171 | /* Return a pt_regs pointer for a valid signal handler frame */ | 126 | /* Return a pt_regs pointer for a valid signal handler frame */ |
| 172 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | 127 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, |
| 128 | struct rt_sigframe* kframe) | ||
| 173 | { | 129 | { |
| 174 | BacktraceIterator *b = &kbt->it; | 130 | BacktraceIterator *b = &kbt->it; |
| 175 | 131 | ||
| 176 | if (b->pc == VDSO_BASE) { | 132 | if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && |
| 177 | struct rt_sigframe *frame; | 133 | b->sp % sizeof(long) == 0) { |
| 178 | unsigned long sigframe_top = | 134 | int retval; |
| 179 | b->sp + sizeof(struct rt_sigframe) - 1; | 135 | pagefault_disable(); |
| 180 | if (!valid_address(kbt, b->sp) || | 136 | retval = __copy_from_user_inatomic( |
| 181 | !valid_address(kbt, sigframe_top)) { | 137 | kframe, (void __user __force *)b->sp, |
| 182 | if (kbt->verbose) | 138 | sizeof(*kframe)); |
| 183 | pr_err(" (odd signal: sp %#lx?)\n", | 139 | pagefault_enable(); |
| 184 | (unsigned long)(b->sp)); | 140 | if (retval != 0 || |
| 141 | (unsigned int)(kframe->info.si_signo) >= _NSIG) | ||
| 185 | return NULL; | 142 | return NULL; |
| 186 | } | ||
| 187 | frame = (struct rt_sigframe *)b->sp; | ||
| 188 | if (kbt->verbose) { | 143 | if (kbt->verbose) { |
| 189 | pr_err(" <received signal %d>\n", | 144 | pr_err(" <received signal %d>\n", |
| 190 | frame->info.si_signo); | 145 | kframe->info.si_signo); |
| 191 | } | 146 | } |
| 192 | return (struct pt_regs *)&frame->uc.uc_mcontext; | 147 | return (struct pt_regs *)&kframe->uc.uc_mcontext; |
| 193 | } | 148 | } |
| 194 | return NULL; | 149 | return NULL; |
| 195 | } | 150 | } |
| @@ -202,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | |||
| 202 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | 157 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) |
| 203 | { | 158 | { |
| 204 | struct pt_regs *p; | 159 | struct pt_regs *p; |
| 160 | struct rt_sigframe kframe; | ||
| 205 | 161 | ||
| 206 | p = valid_fault_handler(kbt); | 162 | p = valid_fault_handler(kbt); |
| 207 | if (p == NULL) | 163 | if (p == NULL) |
| 208 | p = valid_sigframe(kbt); | 164 | p = valid_sigframe(kbt, &kframe); |
| 209 | if (p == NULL) | 165 | if (p == NULL) |
| 210 | return 0; | 166 | return 0; |
| 211 | backtrace_init(&kbt->it, read_memory_func, kbt, | 167 | backtrace_init(&kbt->it, read_memory_func, kbt, |
| @@ -265,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | |||
| 265 | 221 | ||
| 266 | /* | 222 | /* |
| 267 | * Set up callback information. We grab the kernel stack base | 223 | * Set up callback information. We grab the kernel stack base |
| 268 | * so we will allow reads of that address range, and if we're | 224 | * so we will allow reads of that address range. |
| 269 | * asking about the current process we grab the page table | ||
| 270 | * so we can check user accesses before trying to read them. | ||
| 271 | * We flush the TLB to avoid any weird skew issues. | ||
| 272 | */ | 225 | */ |
| 273 | is_current = (t == NULL); | 226 | is_current = (t == NULL || t == current); |
| 274 | kbt->is_current = is_current; | 227 | kbt->is_current = is_current; |
| 275 | if (is_current) | 228 | if (is_current) |
| 276 | t = validate_current(); | 229 | t = validate_current(); |
| 277 | kbt->task = t; | 230 | kbt->task = t; |
| 278 | kbt->pgtable = NULL; | ||
| 279 | kbt->verbose = 0; /* override in caller if desired */ | 231 | kbt->verbose = 0; /* override in caller if desired */ |
| 280 | kbt->profile = 0; /* override in caller if desired */ | 232 | kbt->profile = 0; /* override in caller if desired */ |
| 281 | kbt->end = KBT_ONGOING; | 233 | kbt->end = KBT_ONGOING; |
| 282 | kbt->new_context = 0; | 234 | kbt->new_context = 1; |
| 283 | if (is_current) { | 235 | if (is_current) |
| 284 | HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; | ||
| 285 | if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { | ||
| 286 | /* | ||
| 287 | * Not just an optimization: this also allows | ||
| 288 | * this to work at all before va/pa mappings | ||
| 289 | * are set up. | ||
| 290 | */ | ||
| 291 | kbt->pgtable = swapper_pg_dir; | ||
| 292 | } else { | ||
| 293 | struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); | ||
| 294 | if (!PageHighMem(page)) | ||
| 295 | kbt->pgtable = __va(pgdir_pa); | ||
| 296 | else | ||
| 297 | pr_err("page table not in LOWMEM" | ||
| 298 | " (%#llx)\n", pgdir_pa); | ||
| 299 | } | ||
| 300 | local_flush_tlb_all(); | ||
| 301 | validate_stack(regs); | 236 | validate_stack(regs); |
| 302 | } | ||
| 303 | 237 | ||
| 304 | if (regs == NULL) { | 238 | if (regs == NULL) { |
| 305 | if (is_current || t->state == TASK_RUNNING) { | 239 | if (is_current || t->state == TASK_RUNNING) { |
| @@ -345,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | |||
| 345 | } | 279 | } |
| 346 | EXPORT_SYMBOL(KBacktraceIterator_next); | 280 | EXPORT_SYMBOL(KBacktraceIterator_next); |
| 347 | 281 | ||
| 282 | static void describe_addr(struct KBacktraceIterator *kbt, | ||
| 283 | unsigned long address, | ||
| 284 | int have_mmap_sem, char *buf, size_t bufsize) | ||
| 285 | { | ||
| 286 | struct vm_area_struct *vma; | ||
| 287 | size_t namelen, remaining; | ||
| 288 | unsigned long size, offset, adjust; | ||
| 289 | char *p, *modname; | ||
| 290 | const char *name; | ||
| 291 | int rc; | ||
| 292 | |||
| 293 | /* | ||
| 294 | * Look one byte back for every caller frame (i.e. those that | ||
| 295 | * aren't a new context) so we look up symbol data for the | ||
| 296 | * call itself, not the following instruction, which may be on | ||
| 297 | * a different line (or in a different function). | ||
| 298 | */ | ||
| 299 | adjust = !kbt->new_context; | ||
| 300 | address -= adjust; | ||
| 301 | |||
| 302 | if (address >= PAGE_OFFSET) { | ||
| 303 | /* Handle kernel symbols. */ | ||
| 304 | BUG_ON(bufsize < KSYM_NAME_LEN); | ||
| 305 | name = kallsyms_lookup(address, &size, &offset, | ||
| 306 | &modname, buf); | ||
| 307 | if (name == NULL) { | ||
| 308 | buf[0] = '\0'; | ||
| 309 | return; | ||
| 310 | } | ||
| 311 | namelen = strlen(buf); | ||
| 312 | remaining = (bufsize - 1) - namelen; | ||
| 313 | p = buf + namelen; | ||
| 314 | rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
| 315 | offset + adjust, size); | ||
| 316 | if (modname && rc < remaining) | ||
| 317 | snprintf(p + rc, remaining - rc, "[%s] ", modname); | ||
| 318 | buf[bufsize-1] = '\0'; | ||
| 319 | return; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* If we don't have the mmap_sem, we can't show any more info. */ | ||
| 323 | buf[0] = '\0'; | ||
| 324 | if (!have_mmap_sem) | ||
| 325 | return; | ||
| 326 | |||
| 327 | /* Find vma info. */ | ||
| 328 | vma = find_vma(kbt->task->mm, address); | ||
| 329 | if (vma == NULL || address < vma->vm_start) { | ||
| 330 | snprintf(buf, bufsize, "[unmapped address] "); | ||
| 331 | return; | ||
| 332 | } | ||
| 333 | |||
| 334 | if (vma->vm_file) { | ||
| 335 | char *s; | ||
| 336 | p = d_path(&vma->vm_file->f_path, buf, bufsize); | ||
| 337 | if (IS_ERR(p)) | ||
| 338 | p = "?"; | ||
| 339 | s = strrchr(p, '/'); | ||
| 340 | if (s) | ||
| 341 | p = s+1; | ||
| 342 | } else { | ||
| 343 | p = "anon"; | ||
| 344 | } | ||
| 345 | |||
| 346 | /* Generate a string description of the vma info. */ | ||
| 347 | namelen = strlen(p); | ||
| 348 | remaining = (bufsize - 1) - namelen; | ||
| 349 | memmove(buf, p, namelen); | ||
| 350 | snprintf(buf + namelen, remaining, "[%lx+%lx] ", | ||
| 351 | vma->vm_start, vma->vm_end - vma->vm_start); | ||
| 352 | } | ||
| 353 | |||
| 348 | /* | 354 | /* |
| 349 | * This method wraps the backtracer's more generic support. | 355 | * This method wraps the backtracer's more generic support. |
| 350 | * It is only invoked from the architecture-specific code; show_stack() | 356 | * It is only invoked from the architecture-specific code; show_stack() |
| @@ -353,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next); | |||
| 353 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | 359 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) |
| 354 | { | 360 | { |
| 355 | int i; | 361 | int i; |
| 362 | int have_mmap_sem = 0; | ||
| 356 | 363 | ||
| 357 | if (headers) { | 364 | if (headers) { |
| 358 | /* | 365 | /* |
| @@ -369,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
| 369 | kbt->verbose = 1; | 376 | kbt->verbose = 1; |
| 370 | i = 0; | 377 | i = 0; |
| 371 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | 378 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { |
| 372 | char *modname; | ||
| 373 | const char *name; | ||
| 374 | unsigned long address = kbt->it.pc; | ||
| 375 | unsigned long offset, size; | ||
| 376 | char namebuf[KSYM_NAME_LEN+100]; | 379 | char namebuf[KSYM_NAME_LEN+100]; |
| 380 | unsigned long address = kbt->it.pc; | ||
| 377 | 381 | ||
| 378 | if (address >= PAGE_OFFSET) | 382 | /* Try to acquire the mmap_sem as we pass into userspace. */ |
| 379 | name = kallsyms_lookup(address, &size, &offset, | 383 | if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) |
| 380 | &modname, namebuf); | 384 | have_mmap_sem = |
| 381 | else | 385 | down_read_trylock(&kbt->task->mm->mmap_sem); |
| 382 | name = NULL; | 386 | |
| 383 | 387 | describe_addr(kbt, address, have_mmap_sem, | |
| 384 | if (!name) | 388 | namebuf, sizeof(namebuf)); |
| 385 | namebuf[0] = '\0'; | ||
| 386 | else { | ||
| 387 | size_t namelen = strlen(namebuf); | ||
| 388 | size_t remaining = (sizeof(namebuf) - 1) - namelen; | ||
| 389 | char *p = namebuf + namelen; | ||
| 390 | int rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
| 391 | offset, size); | ||
| 392 | if (modname && rc < remaining) | ||
| 393 | snprintf(p + rc, remaining - rc, | ||
| 394 | "[%s] ", modname); | ||
| 395 | namebuf[sizeof(namebuf)-1] = '\0'; | ||
| 396 | } | ||
| 397 | 389 | ||
| 398 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", | 390 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", |
| 399 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | 391 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); |
| @@ -408,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
| 408 | pr_err("Stack dump stopped; next frame identical to this one\n"); | 400 | pr_err("Stack dump stopped; next frame identical to this one\n"); |
| 409 | if (headers) | 401 | if (headers) |
| 410 | pr_err("Stack dump complete\n"); | 402 | pr_err("Stack dump complete\n"); |
| 403 | if (have_mmap_sem) | ||
| 404 | up_read(&kbt->task->mm->mmap_sem); | ||
| 411 | } | 405 | } |
| 412 | EXPORT_SYMBOL(tile_show_stack); | 406 | EXPORT_SYMBOL(tile_show_stack); |
| 413 | 407 | ||
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 2bb6602a1ee7..73cff814ac57 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c | |||
| @@ -200,7 +200,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
| 200 | { | 200 | { |
| 201 | siginfo_t info = { 0 }; | 201 | siginfo_t info = { 0 }; |
| 202 | int signo, code; | 202 | int signo, code; |
| 203 | unsigned long address; | 203 | unsigned long address = 0; |
| 204 | bundle_bits instr; | 204 | bundle_bits instr; |
| 205 | 205 | ||
| 206 | /* Re-enable interrupts. */ | 206 | /* Re-enable interrupts. */ |
| @@ -223,6 +223,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | switch (fault_num) { | 225 | switch (fault_num) { |
| 226 | case INT_MEM_ERROR: | ||
| 227 | signo = SIGBUS; | ||
| 228 | code = BUS_OBJERR; | ||
| 229 | break; | ||
| 226 | case INT_ILL: | 230 | case INT_ILL: |
| 227 | if (copy_from_user(&instr, (void __user *)regs->pc, | 231 | if (copy_from_user(&instr, (void __user *)regs->pc, |
| 228 | sizeof(instr))) { | 232 | sizeof(instr))) { |
| @@ -289,7 +293,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
| 289 | address = regs->pc; | 293 | address = regs->pc; |
| 290 | break; | 294 | break; |
| 291 | #ifdef __tilegx__ | 295 | #ifdef __tilegx__ |
| 292 | case INT_ILL_TRANS: | 296 | case INT_ILL_TRANS: { |
| 297 | /* Avoid a hardware erratum with the return address stack. */ | ||
| 298 | fill_ra_stack(); | ||
| 299 | |||
| 293 | signo = SIGSEGV; | 300 | signo = SIGSEGV; |
| 294 | code = SEGV_MAPERR; | 301 | code = SEGV_MAPERR; |
| 295 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) | 302 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) |
| @@ -297,6 +304,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
| 297 | else | 304 | else |
| 298 | address = 0; /* FIXME: GX: single-step for address */ | 305 | address = 0; /* FIXME: GX: single-step for address */ |
| 299 | break; | 306 | break; |
| 307 | } | ||
| 300 | #endif | 308 | #endif |
| 301 | default: | 309 | default: |
| 302 | panic("Unexpected do_trap interrupt number %d", fault_num); | 310 | panic("Unexpected do_trap interrupt number %d", fault_num); |
| @@ -308,7 +316,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
| 308 | info.si_addr = (void __user *)address; | 316 | info.si_addr = (void __user *)address; |
| 309 | if (signo == SIGILL) | 317 | if (signo == SIGILL) |
| 310 | info.si_trapno = fault_num; | 318 | info.si_trapno = fault_num; |
| 311 | trace_unhandled_signal("trap", regs, address, signo); | 319 | if (signo != SIGTRAP) |
| 320 | trace_unhandled_signal("trap", regs, address, signo); | ||
| 312 | force_sig_info(signo, &info, current); | 321 | force_sig_info(signo, &info, current); |
| 313 | } | 322 | } |
| 314 | 323 | ||
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile index 0c26086ecbef..985f59858234 100644 --- a/arch/tile/lib/Makefile +++ b/arch/tile/lib/Makefile | |||
| @@ -7,6 +7,7 @@ lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \ | |||
| 7 | strchr_$(BITS).o strlen_$(BITS).o | 7 | strchr_$(BITS).o strlen_$(BITS).o |
| 8 | 8 | ||
| 9 | ifeq ($(CONFIG_TILEGX),y) | 9 | ifeq ($(CONFIG_TILEGX),y) |
| 10 | CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer | ||
| 10 | lib-y += memcpy_user_64.o | 11 | lib-y += memcpy_user_64.o |
| 11 | else | 12 | else |
| 12 | lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o | 13 | lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o |
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index 8928aace7a64..db4fb89e12d8 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c | |||
| @@ -39,7 +39,21 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
| 39 | { | 39 | { |
| 40 | char *p, *base; | 40 | char *p, *base; |
| 41 | size_t step_size, load_count; | 41 | size_t step_size, load_count; |
| 42 | |||
| 43 | /* | ||
| 44 | * On TILEPro the striping granularity is a fixed 8KB; on | ||
| 45 | * TILE-Gx it is configurable, and we rely on the fact that | ||
| 46 | * the hypervisor always configures maximum striping, so that | ||
| 47 | * bits 9 and 10 of the PA are part of the stripe function, so | ||
| 48 | * every 512 bytes we hit a striping boundary. | ||
| 49 | * | ||
| 50 | */ | ||
| 51 | #ifdef __tilegx__ | ||
| 52 | const unsigned long STRIPE_WIDTH = 512; | ||
| 53 | #else | ||
| 42 | const unsigned long STRIPE_WIDTH = 8192; | 54 | const unsigned long STRIPE_WIDTH = 8192; |
| 55 | #endif | ||
| 56 | |||
| 43 | #ifdef __tilegx__ | 57 | #ifdef __tilegx__ |
| 44 | /* | 58 | /* |
| 45 | * On TILE-Gx, we must disable the dstream prefetcher before doing | 59 | * On TILE-Gx, we must disable the dstream prefetcher before doing |
| @@ -74,7 +88,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
| 74 | * memory, that one load would be sufficient, but since we may | 88 | * memory, that one load would be sufficient, but since we may |
| 75 | * be, we also need to back up to the last load issued to | 89 | * be, we also need to back up to the last load issued to |
| 76 | * another memory controller, which would be the point where | 90 | * another memory controller, which would be the point where |
| 77 | * we crossed an 8KB boundary (the granularity of striping | 91 | * we crossed a "striping" boundary (the granularity of striping |
| 78 | * across memory controllers). Keep backing up and doing this | 92 | * across memory controllers). Keep backing up and doing this |
| 79 | * until we are before the beginning of the buffer, or have | 93 | * until we are before the beginning of the buffer, or have |
| 80 | * hit all the controllers. | 94 | * hit all the controllers. |
| @@ -88,12 +102,22 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
| 88 | * every cache line on a full memory stripe on each | 102 | * every cache line on a full memory stripe on each |
| 89 | * controller" that we simply do that, to simplify the logic. | 103 | * controller" that we simply do that, to simplify the logic. |
| 90 | * | 104 | * |
| 91 | * FIXME: See bug 9535 for some issues with this code. | 105 | * On TILE-Gx the hash-for-home function is much more complex, |
| 106 | * with the upshot being we can't readily guarantee we have | ||
| 107 | * hit both entries in the 128-entry AMT that were hit by any | ||
| 108 | * load in the entire range, so we just re-load them all. | ||
| 109 | * With larger buffers, we may want to consider using a hypervisor | ||
| 110 | * trap to issue loads directly to each hash-for-home tile for | ||
| 111 | * each controller (doing it from Linux would trash the TLB). | ||
| 92 | */ | 112 | */ |
| 93 | if (hfh) { | 113 | if (hfh) { |
| 94 | step_size = L2_CACHE_BYTES; | 114 | step_size = L2_CACHE_BYTES; |
| 115 | #ifdef __tilegx__ | ||
| 116 | load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES; | ||
| 117 | #else | ||
| 95 | load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * | 118 | load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * |
| 96 | (1 << CHIP_LOG_NUM_MSHIMS()); | 119 | (1 << CHIP_LOG_NUM_MSHIMS()); |
| 120 | #endif | ||
| 97 | } else { | 121 | } else { |
| 98 | step_size = STRIPE_WIDTH; | 122 | step_size = STRIPE_WIDTH; |
| 99 | load_count = (1 << CHIP_LOG_NUM_MSHIMS()); | 123 | load_count = (1 << CHIP_LOG_NUM_MSHIMS()); |
| @@ -109,7 +133,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
| 109 | 133 | ||
| 110 | /* Figure out how far back we need to go. */ | 134 | /* Figure out how far back we need to go. */ |
| 111 | base = p - (step_size * (load_count - 2)); | 135 | base = p - (step_size * (load_count - 2)); |
| 112 | if ((long)base < (long)buffer) | 136 | if ((unsigned long)base < (unsigned long)buffer) |
| 113 | base = buffer; | 137 | base = buffer; |
| 114 | 138 | ||
| 115 | /* | 139 | /* |
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c index 4763b3aff1cc..37440caa7370 100644 --- a/arch/tile/lib/memcpy_user_64.c +++ b/arch/tile/lib/memcpy_user_64.c | |||
| @@ -14,7 +14,13 @@ | |||
| 14 | * Do memcpy(), but trap and return "n" when a load or store faults. | 14 | * Do memcpy(), but trap and return "n" when a load or store faults. |
| 15 | * | 15 | * |
| 16 | * Note: this idiom only works when memcpy() compiles to a leaf function. | 16 | * Note: this idiom only works when memcpy() compiles to a leaf function. |
| 17 | * If "sp" is updated during memcpy, the "jrp lr" will be incorrect. | 17 | * Here leaf function not only means it does not have calls, but also |
| 18 | * requires no stack operations (sp, stack frame pointer) and no | ||
| 19 | * use of callee-saved registers, else "jrp lr" will be incorrect since | ||
| 20 | * unwinding stack frame is bypassed. Since memcpy() is not complex so | ||
| 21 | * these conditions are satisfied here, but we need to be careful when | ||
| 22 | * modifying this file. This is not a clean solution but is the best | ||
| 23 | * one so far. | ||
| 18 | * | 24 | * |
| 19 | * Also note that we are capturing "n" from the containing scope here. | 25 | * Also note that we are capturing "n" from the containing scope here. |
| 20 | */ | 26 | */ |
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h index c10109809132..6ac37509faca 100644 --- a/arch/tile/lib/spinlock_common.h +++ b/arch/tile/lib/spinlock_common.h | |||
| @@ -60,5 +60,5 @@ static void delay_backoff(int iterations) | |||
| 60 | loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & | 60 | loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & |
| 61 | (loops - 1); | 61 | (loops - 1); |
| 62 | 62 | ||
| 63 | relax(1 << exponent); | 63 | relax(loops); |
| 64 | } | 64 | } |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index cba30e9547b4..22e58f51ed23 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
| @@ -130,7 +130,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |||
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | /* | 132 | /* |
| 133 | * Handle a fault on the vmalloc or module mapping area | 133 | * Handle a fault on the vmalloc area. |
| 134 | */ | 134 | */ |
| 135 | static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) | 135 | static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) |
| 136 | { | 136 | { |
| @@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void) | |||
| 203 | * interrupt or a critical region, and must do as little as possible. | 203 | * interrupt or a critical region, and must do as little as possible. |
| 204 | * Similarly, we can't use atomic ops here, since we may be handling a | 204 | * Similarly, we can't use atomic ops here, since we may be handling a |
| 205 | * fault caused by an atomic op access. | 205 | * fault caused by an atomic op access. |
| 206 | * | ||
| 207 | * If we find a migrating PTE while we're in an NMI context, and we're | ||
| 208 | * at a PC that has a registered exception handler, we don't wait, | ||
| 209 | * since this thread may (e.g.) have been interrupted while migrating | ||
| 210 | * its own stack, which would then cause us to self-deadlock. | ||
| 206 | */ | 211 | */ |
| 207 | static int handle_migrating_pte(pgd_t *pgd, int fault_num, | 212 | static int handle_migrating_pte(pgd_t *pgd, int fault_num, |
| 208 | unsigned long address, | 213 | unsigned long address, unsigned long pc, |
| 209 | int is_kernel_mode, int write) | 214 | int is_kernel_mode, int write) |
| 210 | { | 215 | { |
| 211 | pud_t *pud; | 216 | pud_t *pud; |
| @@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num, | |||
| 227 | pte_offset_kernel(pmd, address); | 232 | pte_offset_kernel(pmd, address); |
| 228 | pteval = *pte; | 233 | pteval = *pte; |
| 229 | if (pte_migrating(pteval)) { | 234 | if (pte_migrating(pteval)) { |
| 235 | if (in_nmi() && search_exception_tables(pc)) | ||
| 236 | return 0; | ||
| 230 | wait_for_migration(pte); | 237 | wait_for_migration(pte); |
| 231 | return 1; | 238 | return 1; |
| 232 | } | 239 | } |
| @@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs, | |||
| 300 | * rather than trying to patch up the existing PTE. | 307 | * rather than trying to patch up the existing PTE. |
| 301 | */ | 308 | */ |
| 302 | pgd = get_current_pgd(); | 309 | pgd = get_current_pgd(); |
| 303 | if (handle_migrating_pte(pgd, fault_num, address, | 310 | if (handle_migrating_pte(pgd, fault_num, address, regs->pc, |
| 304 | is_kernel_mode, write)) | 311 | is_kernel_mode, write)) |
| 305 | return 1; | 312 | return 1; |
| 306 | 313 | ||
| @@ -335,9 +342,12 @@ static int handle_page_fault(struct pt_regs *regs, | |||
| 335 | /* | 342 | /* |
| 336 | * If we're trying to touch user-space addresses, we must | 343 | * If we're trying to touch user-space addresses, we must |
| 337 | * be either at PL0, or else with interrupts enabled in the | 344 | * be either at PL0, or else with interrupts enabled in the |
| 338 | * kernel, so either way we can re-enable interrupts here. | 345 | * kernel, so either way we can re-enable interrupts here |
| 346 | * unless we are doing atomic access to user space with | ||
| 347 | * interrupts disabled. | ||
| 339 | */ | 348 | */ |
| 340 | local_irq_enable(); | 349 | if (!(regs->flags & PT_FLAGS_DISABLE_IRQ)) |
| 350 | local_irq_enable(); | ||
| 341 | 351 | ||
| 342 | mm = tsk->mm; | 352 | mm = tsk->mm; |
| 343 | 353 | ||
| @@ -665,7 +675,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | |||
| 665 | */ | 675 | */ |
| 666 | if (fault_num == INT_DTLB_ACCESS) | 676 | if (fault_num == INT_DTLB_ACCESS) |
| 667 | write = 1; | 677 | write = 1; |
| 668 | if (handle_migrating_pte(pgd, fault_num, address, 1, write)) | 678 | if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write)) |
| 669 | return state; | 679 | return state; |
| 670 | 680 | ||
| 671 | /* Return zero so that we continue on with normal fault handling. */ | 681 | /* Return zero so that we continue on with normal fault handling. */ |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 1cc6ae477c98..499f73770b05 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
| @@ -394,6 +394,7 @@ int page_home(struct page *page) | |||
| 394 | return pte_to_home(*virt_to_pte(NULL, kva)); | 394 | return pte_to_home(*virt_to_pte(NULL, kva)); |
| 395 | } | 395 | } |
| 396 | } | 396 | } |
| 397 | EXPORT_SYMBOL(page_home); | ||
| 397 | 398 | ||
| 398 | void homecache_change_page_home(struct page *page, int order, int home) | 399 | void homecache_change_page_home(struct page *page, int order, int home) |
| 399 | { | 400 | { |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 830c4908ea76..6a9d20ddc34f 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
| @@ -254,11 +254,6 @@ static pgprot_t __init init_pgprot(ulong address) | |||
| 254 | return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); | 254 | return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | /* As a performance optimization, keep the boot init stack here. */ | ||
| 258 | if (address >= (ulong)&init_thread_union && | ||
| 259 | address < (ulong)&init_thread_union + THREAD_SIZE) | ||
| 260 | return construct_pgprot(PAGE_KERNEL, smp_processor_id()); | ||
| 261 | |||
| 262 | #ifndef __tilegx__ | 257 | #ifndef __tilegx__ |
| 263 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | 258 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 264 | /* Force the atomic_locks[] array page to be hash-for-home. */ | 259 | /* Force the atomic_locks[] array page to be hash-for-home. */ |
| @@ -557,6 +552,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 557 | 552 | ||
| 558 | address = MEM_SV_INTRPT; | 553 | address = MEM_SV_INTRPT; |
| 559 | pmd = get_pmd(pgtables, address); | 554 | pmd = get_pmd(pgtables, address); |
| 555 | pfn = 0; /* code starts at PA 0 */ | ||
| 560 | if (ktext_small) { | 556 | if (ktext_small) { |
| 561 | /* Allocate an L2 PTE for the kernel text */ | 557 | /* Allocate an L2 PTE for the kernel text */ |
| 562 | int cpu = 0; | 558 | int cpu = 0; |
| @@ -579,10 +575,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 579 | } | 575 | } |
| 580 | 576 | ||
| 581 | BUG_ON(address != (unsigned long)_stext); | 577 | BUG_ON(address != (unsigned long)_stext); |
| 582 | pfn = 0; /* code starts at PA 0 */ | 578 | pte = NULL; |
| 583 | pte = alloc_pte(); | 579 | for (; address < (unsigned long)_einittext; |
| 584 | for (pte_ofs = 0; address < (unsigned long)_einittext; | 580 | pfn++, address += PAGE_SIZE) { |
| 585 | pfn++, pte_ofs++, address += PAGE_SIZE) { | 581 | pte_ofs = pte_index(address); |
| 582 | if (pte_ofs == 0) { | ||
| 583 | if (pte) | ||
| 584 | assign_pte(pmd++, pte); | ||
| 585 | pte = alloc_pte(); | ||
| 586 | } | ||
| 586 | if (!ktext_local) { | 587 | if (!ktext_local) { |
| 587 | prot = set_remote_cache_cpu(prot, cpu); | 588 | prot = set_remote_cache_cpu(prot, cpu); |
| 588 | cpu = cpumask_next(cpu, &ktext_mask); | 589 | cpu = cpumask_next(cpu, &ktext_mask); |
| @@ -591,7 +592,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 591 | } | 592 | } |
| 592 | pte[pte_ofs] = pfn_pte(pfn, prot); | 593 | pte[pte_ofs] = pfn_pte(pfn, prot); |
| 593 | } | 594 | } |
| 594 | assign_pte(pmd, pte); | 595 | if (pte) |
| 596 | assign_pte(pmd, pte); | ||
| 595 | } else { | 597 | } else { |
| 596 | pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); | 598 | pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); |
| 597 | pteval = pte_mkhuge(pteval); | 599 | pteval = pte_mkhuge(pteval); |
| @@ -614,7 +616,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 614 | else | 616 | else |
| 615 | pteval = hv_pte_set_mode(pteval, | 617 | pteval = hv_pte_set_mode(pteval, |
| 616 | HV_PTE_MODE_CACHE_NO_L3); | 618 | HV_PTE_MODE_CACHE_NO_L3); |
| 617 | *(pte_t *)pmd = pteval; | 619 | for (; address < (unsigned long)_einittext; |
| 620 | pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE) | ||
| 621 | *(pte_t *)(pmd++) = pfn_pte(pfn, pteval); | ||
| 618 | } | 622 | } |
| 619 | 623 | ||
| 620 | /* Set swapper_pgprot here so it is flushed to memory right away. */ | 624 | /* Set swapper_pgprot here so it is flushed to memory right away. */ |
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 87303693a072..2410aa899b3e 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
| @@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr) | |||
| 177 | if (!pmd_huge_page(*pmd)) | 177 | if (!pmd_huge_page(*pmd)) |
| 178 | return; | 178 | return; |
| 179 | 179 | ||
| 180 | /* | 180 | spin_lock_irqsave(&init_mm.page_table_lock, flags); |
| 181 | * Grab the pgd_lock, since we may need it to walk the pgd_list, | ||
| 182 | * and since we need some kind of lock here to avoid races. | ||
| 183 | */ | ||
| 184 | spin_lock_irqsave(&pgd_lock, flags); | ||
| 185 | if (!pmd_huge_page(*pmd)) { | 181 | if (!pmd_huge_page(*pmd)) { |
| 186 | /* Lost the race to convert the huge page. */ | 182 | /* Lost the race to convert the huge page. */ |
| 187 | spin_unlock_irqrestore(&pgd_lock, flags); | 183 | spin_unlock_irqrestore(&init_mm.page_table_lock, flags); |
| 188 | return; | 184 | return; |
| 189 | } | 185 | } |
| 190 | 186 | ||
| @@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr) | |||
| 194 | 190 | ||
| 195 | #ifdef __PAGETABLE_PMD_FOLDED | 191 | #ifdef __PAGETABLE_PMD_FOLDED |
| 196 | /* Walk every pgd on the system and update the pmd there. */ | 192 | /* Walk every pgd on the system and update the pmd there. */ |
| 193 | spin_lock(&pgd_lock); | ||
| 197 | list_for_each(pos, &pgd_list) { | 194 | list_for_each(pos, &pgd_list) { |
| 198 | pmd_t *copy_pmd; | 195 | pmd_t *copy_pmd; |
| 199 | pgd = list_to_pgd(pos) + pgd_index(addr); | 196 | pgd = list_to_pgd(pos) + pgd_index(addr); |
| @@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr) | |||
| 201 | copy_pmd = pmd_offset(pud, addr); | 198 | copy_pmd = pmd_offset(pud, addr); |
| 202 | __set_pmd(copy_pmd, *pmd); | 199 | __set_pmd(copy_pmd, *pmd); |
| 203 | } | 200 | } |
| 201 | spin_unlock(&pgd_lock); | ||
| 204 | #endif | 202 | #endif |
| 205 | 203 | ||
| 206 | /* Tell every cpu to notice the change. */ | 204 | /* Tell every cpu to notice the change. */ |
| @@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr) | |||
| 208 | cpu_possible_mask, NULL, 0); | 206 | cpu_possible_mask, NULL, 0); |
| 209 | 207 | ||
| 210 | /* Hold the lock until the TLB flush is finished to avoid races. */ | 208 | /* Hold the lock until the TLB flush is finished to avoid races. */ |
| 211 | spin_unlock_irqrestore(&pgd_lock, flags); | 209 | spin_unlock_irqrestore(&init_mm.page_table_lock, flags); |
| 212 | } | 210 | } |
| 213 | 211 | ||
| 214 | /* | 212 | /* |
| @@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr) | |||
| 217 | * against pageattr.c; it is the unique case in which a valid change | 215 | * against pageattr.c; it is the unique case in which a valid change |
| 218 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | 216 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
| 219 | * vmalloc faults work because attached pagetables are never freed. | 217 | * vmalloc faults work because attached pagetables are never freed. |
| 220 | * The locking scheme was chosen on the basis of manfred's | 218 | * |
| 221 | * recommendations and having no core impact whatsoever. | 219 | * The lock is always taken with interrupts disabled, unlike on x86 |
| 222 | * -- wli | 220 | * and other platforms, because we need to take the lock in |
| 221 | * shatter_huge_page(), which may be called from an interrupt context. | ||
| 222 | * We are not at risk from the tlbflush IPI deadlock that was seen on | ||
| 223 | * x86, since we use the flush_remote() API to have the hypervisor do | ||
| 224 | * the TLB flushes regardless of irq disabling. | ||
| 223 | */ | 225 | */ |
| 224 | DEFINE_SPINLOCK(pgd_lock); | 226 | DEFINE_SPINLOCK(pgd_lock); |
| 225 | LIST_HEAD(pgd_list); | 227 | LIST_HEAD(pgd_list); |
| @@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte) | |||
| 469 | 471 | ||
| 470 | void set_pte(pte_t *ptep, pte_t pte) | 472 | void set_pte(pte_t *ptep, pte_t pte) |
| 471 | { | 473 | { |
| 472 | struct page *page = pfn_to_page(pte_pfn(pte)); | 474 | if (pte_present(pte) && |
| 473 | 475 | (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) { | |
| 474 | /* Update the home of a PTE if necessary */ | 476 | /* The PTE actually references physical memory. */ |
| 475 | pte = pte_set_home(pte, page_home(page)); | 477 | unsigned long pfn = pte_pfn(pte); |
| 478 | if (pfn_valid(pfn)) { | ||
| 479 | /* Update the home of the PTE from the struct page. */ | ||
| 480 | pte = pte_set_home(pte, page_home(pfn_to_page(pfn))); | ||
| 481 | } else if (hv_pte_get_mode(pte) == 0) { | ||
| 482 | /* remap_pfn_range(), etc, must supply PTE mode. */ | ||
| 483 | panic("set_pte(): out-of-range PFN and mode 0\n"); | ||
| 484 | } | ||
| 485 | } | ||
| 476 | 486 | ||
| 477 | __set_pte(ptep, pte); | 487 | __set_pte(ptep, pte); |
| 478 | } | 488 | } |
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c index 4dc019408fac..3b22a606f79d 100644 --- a/drivers/char/tile-srom.c +++ b/drivers/char/tile-srom.c | |||
| @@ -194,17 +194,17 @@ static ssize_t srom_read(struct file *filp, char __user *buf, | |||
| 194 | 194 | ||
| 195 | hv_retval = _srom_read(srom->hv_devhdl, kernbuf, | 195 | hv_retval = _srom_read(srom->hv_devhdl, kernbuf, |
| 196 | *f_pos, bytes_this_pass); | 196 | *f_pos, bytes_this_pass); |
| 197 | if (hv_retval > 0) { | 197 | if (hv_retval <= 0) { |
| 198 | if (copy_to_user(buf, kernbuf, hv_retval) != 0) { | ||
| 199 | retval = -EFAULT; | ||
| 200 | break; | ||
| 201 | } | ||
| 202 | } else if (hv_retval <= 0) { | ||
| 203 | if (retval == 0) | 198 | if (retval == 0) |
| 204 | retval = hv_retval; | 199 | retval = hv_retval; |
| 205 | break; | 200 | break; |
| 206 | } | 201 | } |
| 207 | 202 | ||
| 203 | if (copy_to_user(buf, kernbuf, hv_retval) != 0) { | ||
| 204 | retval = -EFAULT; | ||
| 205 | break; | ||
| 206 | } | ||
| 207 | |||
| 208 | retval += hv_retval; | 208 | retval += hv_retval; |
| 209 | *f_pos += hv_retval; | 209 | *f_pos += hv_retval; |
| 210 | buf += hv_retval; | 210 | buf += hv_retval; |
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c index 1d5cf06f6c6b..e99d00976189 100644 --- a/drivers/edac/tile_edac.c +++ b/drivers/edac/tile_edac.c | |||
| @@ -145,7 +145,11 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev) | |||
| 145 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | 145 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; |
| 146 | 146 | ||
| 147 | mci->mod_name = DRV_NAME; | 147 | mci->mod_name = DRV_NAME; |
| 148 | #ifdef __tilegx__ | ||
| 149 | mci->ctl_name = "TILEGx_Memory_Controller"; | ||
| 150 | #else | ||
| 148 | mci->ctl_name = "TILEPro_Memory_Controller"; | 151 | mci->ctl_name = "TILEPro_Memory_Controller"; |
| 152 | #endif | ||
| 149 | mci->dev_name = dev_name(&pdev->dev); | 153 | mci->dev_name = dev_name(&pdev->dev); |
| 150 | mci->edac_check = tile_edac_check; | 154 | mci->edac_check = tile_edac_check; |
| 151 | 155 | ||
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 261356c2dc99..3d501ec7fad7 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c | |||
| @@ -342,6 +342,21 @@ inline int __netio_fastio1(u32 fastio_index, u32 arg0) | |||
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | 344 | ||
| 345 | static void tile_net_return_credit(struct tile_net_cpu *info) | ||
| 346 | { | ||
| 347 | struct tile_netio_queue *queue = &info->queue; | ||
| 348 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
| 349 | |||
| 350 | /* Return four credits after every fourth packet. */ | ||
| 351 | if (--qup->__receive_credit_remaining == 0) { | ||
| 352 | u32 interval = qup->__receive_credit_interval; | ||
| 353 | qup->__receive_credit_remaining = interval; | ||
| 354 | __netio_fastio_return_credits(qup->__fastio_index, interval); | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 358 | |||
| 359 | |||
| 345 | /* | 360 | /* |
| 346 | * Provide a linux buffer to LIPP. | 361 | * Provide a linux buffer to LIPP. |
| 347 | */ | 362 | */ |
| @@ -433,7 +448,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | |||
| 433 | struct sk_buff **skb_ptr; | 448 | struct sk_buff **skb_ptr; |
| 434 | 449 | ||
| 435 | /* Request 96 extra bytes for alignment purposes. */ | 450 | /* Request 96 extra bytes for alignment purposes. */ |
| 436 | skb = netdev_alloc_skb(info->napi->dev, len + padding); | 451 | skb = netdev_alloc_skb(info->napi.dev, len + padding); |
| 437 | if (skb == NULL) | 452 | if (skb == NULL) |
| 438 | return false; | 453 | return false; |
| 439 | 454 | ||
| @@ -864,19 +879,11 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |||
| 864 | 879 | ||
| 865 | stats->rx_packets++; | 880 | stats->rx_packets++; |
| 866 | stats->rx_bytes += len; | 881 | stats->rx_bytes += len; |
| 867 | |||
| 868 | if (small) | ||
| 869 | info->num_needed_small_buffers++; | ||
| 870 | else | ||
| 871 | info->num_needed_large_buffers++; | ||
| 872 | } | 882 | } |
| 873 | 883 | ||
| 874 | /* Return four credits after every fourth packet. */ | 884 | /* ISSUE: It would be nice to defer this until the packet has */ |
| 875 | if (--qup->__receive_credit_remaining == 0) { | 885 | /* actually been processed. */ |
| 876 | u32 interval = qup->__receive_credit_interval; | 886 | tile_net_return_credit(info); |
| 877 | qup->__receive_credit_remaining = interval; | ||
| 878 | __netio_fastio_return_credits(qup->__fastio_index, interval); | ||
| 879 | } | ||
| 880 | 887 | ||
| 881 | /* Consume this packet. */ | 888 | /* Consume this packet. */ |
| 882 | qup->__packet_receive_read = index2; | 889 | qup->__packet_receive_read = index2; |
| @@ -1543,7 +1550,7 @@ static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) | |||
| 1543 | 1550 | ||
| 1544 | /* Drain all the LIPP buffers. */ | 1551 | /* Drain all the LIPP buffers. */ |
| 1545 | while (true) { | 1552 | while (true) { |
| 1546 | int buffer; | 1553 | unsigned int buffer; |
| 1547 | 1554 | ||
| 1548 | /* NOTE: This should never fail. */ | 1555 | /* NOTE: This should never fail. */ |
| 1549 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | 1556 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, |
| @@ -1707,7 +1714,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |||
| 1707 | if (!hash_default) { | 1714 | if (!hash_default) { |
| 1708 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | 1715 | void *va = pfn_to_kaddr(pfn) + f->page_offset; |
| 1709 | BUG_ON(PageHighMem(skb_frag_page(f))); | 1716 | BUG_ON(PageHighMem(skb_frag_page(f))); |
| 1710 | finv_buffer_remote(va, f->size, 0); | 1717 | finv_buffer_remote(va, skb_frag_size(f), 0); |
| 1711 | } | 1718 | } |
| 1712 | 1719 | ||
| 1713 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | 1720 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; |
| @@ -1735,8 +1742,8 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |||
| 1735 | * Sometimes, if "sendfile()" requires copying, we will be called with | 1742 | * Sometimes, if "sendfile()" requires copying, we will be called with |
| 1736 | * "data" containing the header and payload, with "frags" being empty. | 1743 | * "data" containing the header and payload, with "frags" being empty. |
| 1737 | * | 1744 | * |
| 1738 | * In theory, "sh->nr_frags" could be 3, but in practice, it seems | 1745 | * Sometimes, for example when using NFS over TCP, a single segment can |
| 1739 | * that this will never actually happen. | 1746 | * span 3 fragments, which must be handled carefully in LEPP. |
| 1740 | * | 1747 | * |
| 1741 | * See "emulate_large_send_offload()" for some reference code, which | 1748 | * See "emulate_large_send_offload()" for some reference code, which |
| 1742 | * does not handle checksumming. | 1749 | * does not handle checksumming. |
| @@ -1844,10 +1851,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |||
| 1844 | 1851 | ||
| 1845 | spin_lock_irqsave(&priv->eq_lock, irqflags); | 1852 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
| 1846 | 1853 | ||
| 1847 | /* | 1854 | /* Handle completions if needed to make room. */ |
| 1848 | * Handle completions if needed to make room. | 1855 | /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ |
| 1849 | * HACK: Spin until there is sufficient room. | ||
| 1850 | */ | ||
| 1851 | if (lepp_num_free_comp_slots(eq) == 0) { | 1856 | if (lepp_num_free_comp_slots(eq) == 0) { |
| 1852 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | 1857 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); |
| 1853 | if (nolds == 0) { | 1858 | if (nolds == 0) { |
| @@ -1861,6 +1866,7 @@ busy: | |||
| 1861 | cmd_tail = eq->cmd_tail; | 1866 | cmd_tail = eq->cmd_tail; |
| 1862 | 1867 | ||
| 1863 | /* Prepare to advance, detecting full queue. */ | 1868 | /* Prepare to advance, detecting full queue. */ |
| 1869 | /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ | ||
| 1864 | cmd_next = cmd_tail + cmd_size; | 1870 | cmd_next = cmd_tail + cmd_size; |
| 1865 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | 1871 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) |
| 1866 | goto busy; | 1872 | goto busy; |
| @@ -2023,10 +2029,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
| 2023 | 2029 | ||
| 2024 | spin_lock_irqsave(&priv->eq_lock, irqflags); | 2030 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
| 2025 | 2031 | ||
| 2026 | /* | 2032 | /* Handle completions if needed to make room. */ |
| 2027 | * Handle completions if needed to make room. | 2033 | /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ |
| 2028 | * HACK: Spin until there is sufficient room. | ||
| 2029 | */ | ||
| 2030 | if (lepp_num_free_comp_slots(eq) == 0) { | 2034 | if (lepp_num_free_comp_slots(eq) == 0) { |
| 2031 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | 2035 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); |
| 2032 | if (nolds == 0) { | 2036 | if (nolds == 0) { |
| @@ -2040,6 +2044,7 @@ busy: | |||
| 2040 | cmd_tail = eq->cmd_tail; | 2044 | cmd_tail = eq->cmd_tail; |
| 2041 | 2045 | ||
| 2042 | /* Copy the commands, or fail. */ | 2046 | /* Copy the commands, or fail. */ |
| 2047 | /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ | ||
| 2043 | for (i = 0; i < num_frags; i++) { | 2048 | for (i = 0; i < num_frags; i++) { |
| 2044 | 2049 | ||
| 2045 | /* Prepare to advance, detecting full queue. */ | 2050 | /* Prepare to advance, detecting full queue. */ |
| @@ -2261,6 +2266,23 @@ static int tile_net_get_mac(struct net_device *dev) | |||
| 2261 | return 0; | 2266 | return 0; |
| 2262 | } | 2267 | } |
| 2263 | 2268 | ||
| 2269 | |||
| 2270 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2271 | /* | ||
| 2272 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 2273 | * without having to re-enable interrupts. It's not called while | ||
| 2274 | * the interrupt routine is executing. | ||
| 2275 | */ | ||
| 2276 | static void tile_net_netpoll(struct net_device *dev) | ||
| 2277 | { | ||
| 2278 | struct tile_net_priv *priv = netdev_priv(dev); | ||
| 2279 | disable_percpu_irq(priv->intr_id); | ||
| 2280 | tile_net_handle_ingress_interrupt(priv->intr_id, dev); | ||
| 2281 | enable_percpu_irq(priv->intr_id, 0); | ||
| 2282 | } | ||
| 2283 | #endif | ||
| 2284 | |||
| 2285 | |||
| 2264 | static const struct net_device_ops tile_net_ops = { | 2286 | static const struct net_device_ops tile_net_ops = { |
| 2265 | .ndo_open = tile_net_open, | 2287 | .ndo_open = tile_net_open, |
| 2266 | .ndo_stop = tile_net_stop, | 2288 | .ndo_stop = tile_net_stop, |
| @@ -2269,7 +2291,10 @@ static const struct net_device_ops tile_net_ops = { | |||
| 2269 | .ndo_get_stats = tile_net_get_stats, | 2291 | .ndo_get_stats = tile_net_get_stats, |
| 2270 | .ndo_change_mtu = tile_net_change_mtu, | 2292 | .ndo_change_mtu = tile_net_change_mtu, |
| 2271 | .ndo_tx_timeout = tile_net_tx_timeout, | 2293 | .ndo_tx_timeout = tile_net_tx_timeout, |
| 2272 | .ndo_set_mac_address = tile_net_set_mac_address | 2294 | .ndo_set_mac_address = tile_net_set_mac_address, |
| 2295 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2296 | .ndo_poll_controller = tile_net_netpoll, | ||
| 2297 | #endif | ||
| 2273 | }; | 2298 | }; |
| 2274 | 2299 | ||
| 2275 | 2300 | ||
| @@ -2409,7 +2434,7 @@ static void tile_net_cleanup(void) | |||
| 2409 | */ | 2434 | */ |
| 2410 | static int tile_net_init_module(void) | 2435 | static int tile_net_init_module(void) |
| 2411 | { | 2436 | { |
| 2412 | pr_info("Tilera IPP Net Driver\n"); | 2437 | pr_info("Tilera Network Driver\n"); |
| 2413 | 2438 | ||
| 2414 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | 2439 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); |
| 2415 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | 2440 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); |
