aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-07 11:36:54 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-08-13 16:26:01 -0400
commitbc1a298f4e04833db4c430df59b90039f0170515 (patch)
tree802da739309efeab62317f62ec4f1989f3f7d8dd /arch/tile
parent1182b69cb24c4f7d7ee8c8afe41b5ab2bc05a15b (diff)
tile: support CONFIG_PREEMPT
This change adds support for CONFIG_PREEMPT (full kernel preemption). In addition to the core support, this change includes a number of places where we fix up uses of smp_processor_id() and per-cpu variables. I also eliminate the PAGE_HOME_HERE and PAGE_HOME_UNKNOWN values for page homing, as it turns out they weren't being used. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/include/asm/homecache.h8
-rw-r--r--arch/tile/include/asm/irqflags.h21
-rw-r--r--arch/tile/kernel/asm-offsets.c2
-rw-r--r--arch/tile/kernel/hardwall.c18
-rw-r--r--arch/tile/kernel/intvec_32.S27
-rw-r--r--arch/tile/kernel/intvec_64.S30
-rw-r--r--arch/tile/kernel/irq.c1
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c8
-rw-r--r--arch/tile/kernel/stack.c4
-rw-r--r--arch/tile/kernel/sys.c4
-rw-r--r--arch/tile/lib/memcpy_tile64.c12
-rw-r--r--arch/tile/mm/homecache.c4
14 files changed, 98 insertions, 45 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 0576e1d8c4f9..1126b9d2f4cc 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -301,6 +301,8 @@ config PAGE_OFFSET
301 301
302source "mm/Kconfig" 302source "mm/Kconfig"
303 303
304source "kernel/Kconfig.preempt"
305
304config CMDLINE_BOOL 306config CMDLINE_BOOL
305 bool "Built-in kernel command line" 307 bool "Built-in kernel command line"
306 default n 308 default n
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h
index 7b7771328642..49d19dfc0630 100644
--- a/arch/tile/include/asm/homecache.h
+++ b/arch/tile/include/asm/homecache.h
@@ -44,16 +44,8 @@ struct zone;
44 */ 44 */
45#define PAGE_HOME_INCOHERENT -3 45#define PAGE_HOME_INCOHERENT -3
46 46
47#if CHIP_HAS_CBOX_HOME_MAP()
48/* Home for the page is distributed via hash-for-home. */ 47/* Home for the page is distributed via hash-for-home. */
49#define PAGE_HOME_HASH -4 48#define PAGE_HOME_HASH -4
50#endif
51
52/* Homing is unknown or unspecified. Not valid for page_home(). */
53#define PAGE_HOME_UNKNOWN -5
54
55/* Home on the current cpu. Not valid for page_home(). */
56#define PAGE_HOME_HERE -6
57 49
58/* Support wrapper to use instead of explicit hv_flush_remote(). */ 50/* Support wrapper to use instead of explicit hv_flush_remote(). */
59extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, 51extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index c96f9bbb760d..71af5747874d 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -124,6 +124,12 @@
124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); 124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) 125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
126 126
127#ifdef CONFIG_DEBUG_PREEMPT
128/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
129extern unsigned int debug_smp_processor_id(void);
130# define smp_processor_id() debug_smp_processor_id()
131#endif
132
127/* Disable interrupts. */ 133/* Disable interrupts. */
128#define arch_local_irq_disable() \ 134#define arch_local_irq_disable() \
129 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) 135 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
@@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
132#define arch_local_irq_disable_all() \ 138#define arch_local_irq_disable_all() \
133 interrupt_mask_set_mask(-1ULL) 139 interrupt_mask_set_mask(-1ULL)
134 140
141/*
142 * Read the set of maskable interrupts.
143 * We avoid the preemption warning here via __this_cpu_ptr since even
144 * if irqs are already enabled, it's harmless to read the wrong cpu's
145 * enabled mask.
146 */
147#define arch_local_irqs_enabled() \
148 (*__this_cpu_ptr(&interrupts_enabled_mask))
149
135/* Re-enable all maskable interrupts. */ 150/* Re-enable all maskable interrupts. */
136#define arch_local_irq_enable() \ 151#define arch_local_irq_enable() \
137 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) 152 interrupt_mask_reset_mask(arch_local_irqs_enabled())
138 153
139/* Disable or enable interrupts based on flag argument. */ 154/* Disable or enable interrupts based on flag argument. */
140#define arch_local_irq_restore(disabled) do { \ 155#define arch_local_irq_restore(disabled) do { \
@@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
161 176
162/* Prevent the given interrupt from being enabled next time we enable irqs. */ 177/* Prevent the given interrupt from being enabled next time we enable irqs. */
163#define arch_local_irq_mask(interrupt) \ 178#define arch_local_irq_mask(interrupt) \
164 (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) 179 this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
165 180
166/* Prevent the given interrupt from being enabled immediately. */ 181/* Prevent the given interrupt from being enabled immediately. */
167#define arch_local_irq_mask_now(interrupt) do { \ 182#define arch_local_irq_mask_now(interrupt) do { \
@@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
171 186
172/* Allow the given interrupt to be enabled next time we enable irqs. */ 187/* Allow the given interrupt to be enabled next time we enable irqs. */
173#define arch_local_irq_unmask(interrupt) \ 188#define arch_local_irq_unmask(interrupt) \
174 (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) 189 this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
175 190
176/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 191/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
177#define arch_local_irq_unmask_now(interrupt) do { \ 192#define arch_local_irq_unmask_now(interrupt) do { \
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c
index 8652b0be4685..97ea6ac0a47b 100644
--- a/arch/tile/kernel/asm-offsets.c
+++ b/arch/tile/kernel/asm-offsets.c
@@ -58,6 +58,8 @@ void foo(void)
58 offsetof(struct thread_info, status)); 58 offsetof(struct thread_info, status));
59 DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, 59 DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
60 offsetof(struct thread_info, homecache_cpu)); 60 offsetof(struct thread_info, homecache_cpu));
61 DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
62 offsetof(struct thread_info, preempt_count));
61 DEFINE(THREAD_INFO_STEP_STATE_OFFSET, 63 DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
62 offsetof(struct thread_info, step_state)); 64 offsetof(struct thread_info, step_state));
63#ifdef __tilegx__ 65#ifdef __tilegx__
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 7db8893d4fc5..df27a1fd94a3 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
272 struct hardwall_info *r = info; 272 struct hardwall_info *r = info;
273 struct hardwall_type *hwt = r->type; 273 struct hardwall_type *hwt = r->type;
274 274
275 int cpu = smp_processor_id(); 275 int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
276 int x = cpu % smp_width; 276 int x = cpu_x(cpu);
277 int y = cpu / smp_width; 277 int y = cpu_y(cpu);
278 int bits = 0; 278 int bits = 0;
279 if (x == r->ulhc_x) 279 if (x == r->ulhc_x)
280 bits |= W_PROTECT; 280 bits |= W_PROTECT;
@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); 317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
318} 318}
319 319
320/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
320void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) 321void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
321{ 322{
322 struct hardwall_info *rect; 323 struct hardwall_info *rect;
@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
325 struct siginfo info; 326 struct siginfo info;
326 int cpu = smp_processor_id(); 327 int cpu = smp_processor_id();
327 int found_processes; 328 int found_processes;
328 unsigned long flags;
329 struct pt_regs *old_regs = set_irq_regs(regs); 329 struct pt_regs *old_regs = set_irq_regs(regs);
330 330
331 irq_enter(); 331 irq_enter();
@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
346 BUG_ON(hwt->disabled); 346 BUG_ON(hwt->disabled);
347 347
348 /* This tile trapped a network access; find the rectangle. */ 348 /* This tile trapped a network access; find the rectangle. */
349 spin_lock_irqsave(&hwt->lock, flags); 349 spin_lock(&hwt->lock);
350 list_for_each_entry(rect, &hwt->list, list) { 350 list_for_each_entry(rect, &hwt->list, list) {
351 if (cpumask_test_cpu(cpu, &rect->cpumask)) 351 if (cpumask_test_cpu(cpu, &rect->cpumask))
352 break; 352 break;
@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
401 pr_notice("hardwall: no associated processes!\n"); 401 pr_notice("hardwall: no associated processes!\n");
402 402
403 done: 403 done:
404 spin_unlock_irqrestore(&hwt->lock, flags); 404 spin_unlock(&hwt->lock);
405 405
406 /* 406 /*
407 * We have to disable firewall interrupts now, or else when we 407 * We have to disable firewall interrupts now, or else when we
@@ -661,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
661 return -EINVAL; 661 return -EINVAL;
662 662
663 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", 663 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
664 task->pid, task->comm, hwt->name, smp_processor_id()); 664 task->pid, task->comm, hwt->name, raw_smp_processor_id());
665 return 0; 665 return 0;
666} 666}
667 667
@@ -803,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
803 /* Reset UDN coordinates to their standard value */ 803 /* Reset UDN coordinates to their standard value */
804 { 804 {
805 unsigned int cpu = smp_processor_id(); 805 unsigned int cpu = smp_processor_id();
806 unsigned int x = cpu % smp_width; 806 unsigned int x = cpu_x(cpu);
807 unsigned int y = cpu / smp_width; 807 unsigned int y = cpu_y(cpu);
808 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); 808 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
809 } 809 }
810 810
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 388061319c4c..10767655689e 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -28,10 +28,6 @@
28#include <arch/interrupts.h> 28#include <arch/interrupts.h>
29#include <arch/spr_def.h> 29#include <arch/spr_def.h>
30 30
31#ifdef CONFIG_PREEMPT
32# error "No support for kernel preemption currently"
33#endif
34
35#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 31#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
36 32
37#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) 33#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
@@ -812,17 +808,34 @@ STD_ENTRY(interrupt_return)
812 } 808 }
813 lw r29, r29 809 lw r29, r29
814 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 810 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
811 bzt r29, .Lresume_userspace
812
813#ifdef CONFIG_PREEMPT
814 /* Returning to kernel space. Check if we need preemption. */
815 GET_THREAD_INFO(r29)
816 addli r28, r29, THREAD_INFO_FLAGS_OFFSET
815 { 817 {
816 bzt r29, .Lresume_userspace 818 lw r28, r28
817 PTREGS_PTR(r29, PTREGS_OFFSET_PC) 819 addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
820 }
821 {
822 andi r28, r28, _TIF_NEED_RESCHED
823 lw r29, r29
818 } 824 }
825 bzt r28, 1f
826 bnz r29, 1f
827 jal preempt_schedule_irq
828 FEEDBACK_REENTER(interrupt_return)
8291:
830#endif
819 831
820 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ 832 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
821 { 833 {
822 lw r28, r29 834 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
823 moveli r27, lo16(_cpu_idle_nap) 835 moveli r27, lo16(_cpu_idle_nap)
824 } 836 }
825 { 837 {
838 lw r28, r29
826 auli r27, r27, ha16(_cpu_idle_nap) 839 auli r27, r27, ha16(_cpu_idle_nap)
827 } 840 }
828 { 841 {
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 884af9ea5bed..38a60f27707c 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -30,10 +30,6 @@
30#include <arch/interrupts.h> 30#include <arch/interrupts.h>
31#include <arch/spr_def.h> 31#include <arch/spr_def.h>
32 32
33#ifdef CONFIG_PREEMPT
34# error "No support for kernel preemption currently"
35#endif
36
37#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 33#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
38 34
39#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) 35#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
@@ -820,11 +816,33 @@ STD_ENTRY(interrupt_return)
820 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 816 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
821 { 817 {
822 beqzt r29, .Lresume_userspace 818 beqzt r29, .Lresume_userspace
823 PTREGS_PTR(r29, PTREGS_OFFSET_PC) 819 move r29, sp
820 }
821
822#ifdef CONFIG_PREEMPT
823 /* Returning to kernel space. Check if we need preemption. */
824 EXTRACT_THREAD_INFO(r29)
825 addli r28, r29, THREAD_INFO_FLAGS_OFFSET
826 {
827 ld r28, r28
828 addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
824 } 829 }
830 {
831 andi r28, r28, _TIF_NEED_RESCHED
832 ld4s r29, r29
833 }
834 beqzt r28, 1f
835 bnez r29, 1f
836 jal preempt_schedule_irq
837 FEEDBACK_REENTER(interrupt_return)
8381:
839#endif
825 840
826 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ 841 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
827 moveli r27, hw2_last(_cpu_idle_nap) 842 {
843 moveli r27, hw2_last(_cpu_idle_nap)
844 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
845 }
828 { 846 {
829 ld r28, r29 847 ld r28, r29
830 shl16insli r27, r27, hw1(_cpu_idle_nap) 848 shl16insli r27, r27, hw1(_cpu_idle_nap)
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 02e628065012..c90de6c3cb7f 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -74,6 +74,7 @@ static DEFINE_SPINLOCK(available_irqs_lock);
74/* 74/*
75 * The interrupt handling path, implemented in terms of HV interrupt 75 * The interrupt handling path, implemented in terms of HV interrupt
76 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. 76 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
77 * Entered with interrupts disabled.
77 */ 78 */
78void tile_dev_intr(struct pt_regs *regs, int intnum) 79void tile_dev_intr(struct pt_regs *regs, int intnum)
79{ 80{
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index cbc73a8b8fe1..6cc520d71d2b 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -100,8 +100,8 @@ static void smp_start_cpu_interrupt(void)
100/* Handler to stop the current cpu. */ 100/* Handler to stop the current cpu. */
101static void smp_stop_cpu_interrupt(void) 101static void smp_stop_cpu_interrupt(void)
102{ 102{
103 set_cpu_online(smp_processor_id(), 0);
104 arch_local_irq_disable_all(); 103 arch_local_irq_disable_all();
104 set_cpu_online(smp_processor_id(), 0);
105 for (;;) 105 for (;;)
106 asm("nap; nop"); 106 asm("nap; nop");
107} 107}
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 44bab29bf2f3..dee7f13c5854 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -142,13 +142,15 @@ static struct cpumask cpu_started __cpuinitdata;
142 */ 142 */
143static void __cpuinit start_secondary(void) 143static void __cpuinit start_secondary(void)
144{ 144{
145 int cpuid = smp_processor_id(); 145 int cpuid;
146
147 preempt_disable();
148
149 cpuid = smp_processor_id();
146 150
147 /* Set our thread pointer appropriately. */ 151 /* Set our thread pointer appropriately. */
148 set_my_cpu_offset(__per_cpu_offset[cpuid]); 152 set_my_cpu_offset(__per_cpu_offset[cpuid]);
149 153
150 preempt_disable();
151
152 /* 154 /*
153 * In large machines even this will slow us down, since we 155 * In large machines even this will slow us down, since we
154 * will be contending for for the printk spinlock. 156 * will be contending for for the printk spinlock.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c972689231ef..176ffe48eee9 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -194,7 +194,7 @@ static int KBacktraceIterator_next_item_inclusive(
194 */ 194 */
195static void validate_stack(struct pt_regs *regs) 195static void validate_stack(struct pt_regs *regs)
196{ 196{
197 int cpu = smp_processor_id(); 197 int cpu = raw_smp_processor_id();
198 unsigned long ksp0 = get_current_ksp0(); 198 unsigned long ksp0 = get_current_ksp0();
199 unsigned long ksp0_base = ksp0 - THREAD_SIZE; 199 unsigned long ksp0_base = ksp0 - THREAD_SIZE;
200 unsigned long sp = stack_pointer; 200 unsigned long sp = stack_pointer;
@@ -392,7 +392,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
392 pr_err("Starting stack dump of tid %d, pid %d (%s)" 392 pr_err("Starting stack dump of tid %d, pid %d (%s)"
393 " on cpu %d at cycle %lld\n", 393 " on cpu %d at cycle %lld\n",
394 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 394 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
395 smp_processor_id(), get_cycles()); 395 raw_smp_processor_id(), get_cycles());
396 } 396 }
397 kbt->verbose = 1; 397 kbt->verbose = 1;
398 i = 0; 398 i = 0;
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index b881a7be24bd..38debe706061 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -38,8 +38,10 @@
38SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len, 38SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
39 unsigned long, flags) 39 unsigned long, flags)
40{ 40{
41 /* DCACHE is not particularly effective if not bound to one cpu. */
41 if (flags & DCACHE) 42 if (flags & DCACHE)
42 homecache_evict(cpumask_of(smp_processor_id())); 43 homecache_evict(cpumask_of(raw_smp_processor_id()));
44
43 if (flags & ICACHE) 45 if (flags & ICACHE)
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm), 46 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
45 0, 0, 0, NULL, NULL, 0); 47 0, 0, 0, NULL, NULL, 0);
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index 3bc4b4e40d93..0290c222847b 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -65,7 +65,7 @@ static void memcpy_multicache(void *dest, const void *source,
65 pmd_t *pmdp; 65 pmd_t *pmdp;
66 pte_t *ptep; 66 pte_t *ptep;
67 int type0, type1; 67 int type0, type1;
68 int cpu = get_cpu(); 68 int cpu = smp_processor_id();
69 69
70 /* 70 /*
71 * Disable interrupts so that we don't recurse into memcpy() 71 * Disable interrupts so that we don't recurse into memcpy()
@@ -126,7 +126,6 @@ static void memcpy_multicache(void *dest, const void *source,
126 kmap_atomic_idx_pop(); 126 kmap_atomic_idx_pop();
127 sim_allow_multiple_caching(0); 127 sim_allow_multiple_caching(0);
128 local_irq_restore(flags); 128 local_irq_restore(flags);
129 put_cpu();
130} 129}
131 130
132/* 131/*
@@ -137,6 +136,9 @@ static void memcpy_multicache(void *dest, const void *source,
137static unsigned long fast_copy(void *dest, const void *source, int len, 136static unsigned long fast_copy(void *dest, const void *source, int len,
138 memcpy_t func) 137 memcpy_t func)
139{ 138{
139 int cpu = get_cpu();
140 unsigned long retval;
141
140 /* 142 /*
141 * Check if it's big enough to bother with. We may end up doing a 143 * Check if it's big enough to bother with. We may end up doing a
142 * small copy via TLB manipulation if we're near a page boundary, 144 * small copy via TLB manipulation if we're near a page boundary,
@@ -158,7 +160,7 @@ retry_source:
158 !hv_pte_get_readable(src_pte) || 160 !hv_pte_get_readable(src_pte) ||
159 hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) 161 hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
160 break; 162 break;
161 if (get_remote_cache_cpu(src_pte) == smp_processor_id()) 163 if (get_remote_cache_cpu(src_pte) == cpu)
162 break; 164 break;
163 src_page = pfn_to_page(pte_pfn(src_pte)); 165 src_page = pfn_to_page(pte_pfn(src_pte));
164 get_page(src_page); 166 get_page(src_page);
@@ -235,7 +237,9 @@ retry_dest:
235 len -= copy_size; 237 len -= copy_size;
236 } 238 }
237 239
238 return func(dest, source, len); 240 retval = func(dest, source, len);
241 put_cpu();
242 return retval;
239} 243}
240 244
241void *memcpy(void *to, const void *from, __kernel_size_t n) 245void *memcpy(void *to, const void *from, __kernel_size_t n)
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 1ae911939a18..df46a2d5bdf0 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -172,7 +172,8 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
172 172
173static void homecache_finv_page_va(void* va, int home) 173static void homecache_finv_page_va(void* va, int home)
174{ 174{
175 if (home == smp_processor_id()) { 175 int cpu = get_cpu();
176 if (home == cpu) {
176 finv_buffer_local(va, PAGE_SIZE); 177 finv_buffer_local(va, PAGE_SIZE);
177 } else if (home == PAGE_HOME_HASH) { 178 } else if (home == PAGE_HOME_HASH) {
178 finv_buffer_remote(va, PAGE_SIZE, 1); 179 finv_buffer_remote(va, PAGE_SIZE, 1);
@@ -180,6 +181,7 @@ static void homecache_finv_page_va(void* va, int home)
180 BUG_ON(home < 0 || home >= NR_CPUS); 181 BUG_ON(home < 0 || home >= NR_CPUS);
181 finv_buffer_remote(va, PAGE_SIZE, 0); 182 finv_buffer_remote(va, PAGE_SIZE, 0);
182 } 183 }
184 put_cpu();
183} 185}
184 186
185void homecache_finv_map_page(struct page *page, int home) 187void homecache_finv_map_page(struct page *page, int home)