aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/highmem.h1
-rw-r--r--arch/tile/include/asm/kmap_types.h34
-rw-r--r--arch/tile/include/asm/pgtable.h6
-rw-r--r--arch/tile/include/asm/stat.h3
-rw-r--r--arch/tile/include/asm/unistd.h1
-rw-r--r--arch/tile/kernel/compat.c10
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/ptrace.c39
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/signal.c9
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/lib/memcpy_tile64.c11
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c8
-rw-r--r--arch/tile/mm/pgtable.c4
21 files changed, 103 insertions, 69 deletions
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee186721..b2a6c5de79ab 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
23 23
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/threads.h> 25#include <linux/threads.h>
26#include <asm/kmap_types.h>
27#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
28#include <asm/homecache.h> 27#include <asm/homecache.h>
29 28
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106d1c05..3d0f20246260 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
16#define _ASM_TILE_KMAP_TYPES_H 16#define _ASM_TILE_KMAP_TYPES_H
17 17
18/* 18/*
19 * In TILE Linux each set of four of these uses another 16MB chunk of 19 * In 32-bit TILE Linux we have to balance the desire to have a lot of
20 * address space, given 64 tiles and 64KB pages, so we only enable 20 * nested atomic mappings with the fact that large page sizes and many
21 * ones that are required by the kernel configuration. 21 * processors chew up address space quickly. In a typical
22 * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
23 * adds 4MB of required address-space. For now we leave KM_TYPE_NR
24 * set to depth 8.
22 */ 25 */
23enum km_type { 26enum km_type {
27 KM_TYPE_NR = 8
28};
29
30/*
31 * We provide dummy definitions of all the stray values that used to be
32 * required for kmap_atomic() and no longer are.
33 */
34enum {
24 KM_BOUNCE_READ, 35 KM_BOUNCE_READ,
25 KM_SKB_SUNRPC_DATA, 36 KM_SKB_SUNRPC_DATA,
26 KM_SKB_DATA_SOFTIRQ, 37 KM_SKB_DATA_SOFTIRQ,
27 KM_USER0, 38 KM_USER0,
28 KM_USER1, 39 KM_USER1,
29 KM_BIO_SRC_IRQ, 40 KM_BIO_SRC_IRQ,
41 KM_BIO_DST_IRQ,
42 KM_PTE0,
43 KM_PTE1,
30 KM_IRQ0, 44 KM_IRQ0,
31 KM_IRQ1, 45 KM_IRQ1,
32 KM_SOFTIRQ0, 46 KM_SOFTIRQ0,
33 KM_SOFTIRQ1, 47 KM_SOFTIRQ1,
34 KM_MEMCPY0, 48 KM_SYNC_ICACHE,
35 KM_MEMCPY1, 49 KM_SYNC_DCACHE,
36#if defined(CONFIG_HIGHPTE) 50 KM_UML_USERCOPY,
37 KM_PTE0, 51 KM_IRQ_PTE,
38 KM_PTE1, 52 KM_NMI,
39#endif 53 KM_NMI_PTE,
40 KM_TYPE_NR 54 KM_KDB
41}; 55};
42 56
43#endif /* _ASM_TILE_KMAP_TYPES_H */ 57#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd855bc..a6604e9485da 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
344#define pgd_offset_k(address) pgd_offset(&init_mm, address) 344#define pgd_offset_k(address) pgd_offset(&init_mm, address)
345 345
346#if defined(CONFIG_HIGHPTE) 346#if defined(CONFIG_HIGHPTE)
347extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); 347extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
348#define pte_offset_map(dir, address) \ 348#define pte_unmap(pte) kunmap_atomic(pte)
349 _pte_offset_map(dir, address, KM_PTE0)
350#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
351#else 349#else
352#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 350#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
353#define pte_unmap(pte) do { } while (0) 351#define pte_unmap(pte) do { } while (0)
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa92c70..b16e5db8f0e7 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
1#ifdef CONFIG_COMPAT
2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
3#endif
1#include <asm-generic/stat.h> 4#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff485333..b35c2db71199 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@ __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
41#ifdef CONFIG_COMPAT 41#ifdef CONFIG_COMPAT
42#define __ARCH_WANT_SYS_LLSEEK 42#define __ARCH_WANT_SYS_LLSEEK
43#endif 43#endif
44#define __ARCH_WANT_SYS_NEWFSTATAT
44#endif 45#endif
45 46
46#endif /* _ASM_TILE_UNISTD_H */ 47#endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cdd9462..67617a05e602 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -148,11 +148,11 @@ long tile_compat_sys_msgrcv(int msqid,
148#define compat_sys_readahead sys32_readahead 148#define compat_sys_readahead sys32_readahead
149#define compat_sys_sync_file_range compat_sys_sync_file_range2 149#define compat_sys_sync_file_range compat_sys_sync_file_range2
150 150
151/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ 151/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
152#define compat_sys_stat64 sys_newstat 152#define compat_sys_stat64 sys_stat64
153#define compat_sys_lstat64 sys_newlstat 153#define compat_sys_lstat64 sys_lstat64
154#define compat_sys_fstat64 sys_newfstat 154#define compat_sys_fstat64 sys_fstat64
155#define compat_sys_fstatat64 sys_newfstatat 155#define compat_sys_fstatat64 sys_fstatat64
156 156
157/* The native sys_ptrace dynamically handles compat binaries. */ 157/* The native sys_ptrace dynamically handles compat binaries. */
158#define compat_sys_ptrace sys_ptrace 158#define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a0..493a0e66d916 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
54void early_panic(const char *fmt, ...) 54void early_panic(const char *fmt, ...)
55{ 55{
56 va_list ap; 56 va_list ap;
57 raw_local_irq_disable_all(); 57 arch_local_irq_disable_all();
58 va_start(ap, fmt); 58 va_start(ap, fmt);
59 early_printk("Kernel panic - not syncing: "); 59 early_printk("Kernel panic - not syncing: ");
60 early_vprintk(fmt, ap); 60 early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a7843410..e910530436e6 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
151 151
152static void enable_firewall_interrupts(void) 152static void enable_firewall_interrupts(void)
153{ 153{
154 raw_local_irq_unmask_now(INT_UDN_FIREWALL); 154 arch_local_irq_unmask_now(INT_UDN_FIREWALL);
155} 155}
156 156
157static void disable_firewall_interrupts(void) 157static void disable_firewall_interrupts(void)
158{ 158{
159 raw_local_irq_mask_now(INT_UDN_FIREWALL); 159 arch_local_irq_mask_now(INT_UDN_FIREWALL);
160} 160}
161 161
162/* Set up hardwall on this cpu based on the passed hardwall_info. */ 162/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
768} 768}
769 769
770static const struct file_operations dev_hardwall_fops = { 770static const struct file_operations dev_hardwall_fops = {
771 .open = nonseekable_open,
771 .unlocked_ioctl = hardwall_ioctl, 772 .unlocked_ioctl = hardwall_ioctl,
772#ifdef CONFIG_COMPAT 773#ifdef CONFIG_COMPAT
773 .compat_ioctl = hardwall_compat_ioctl, 774 .compat_ioctl = hardwall_compat_ioctl,
774#endif 775#endif
775 .flush = hardwall_flush, 776 .flush = hardwall_flush,
776 .release = hardwall_release, 777 .release = hardwall_release,
777 .llseek = noop_llseek,
778}; 778};
779 779
780static struct cdev hardwall_dev; 780static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e63917687e99..128805ef8f2c 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
26#define IS_HW_CLEARED 1 26#define IS_HW_CLEARED 1
27 27
28/* 28/*
29 * The set of interrupts we enable for raw_local_irq_enable(). 29 * The set of interrupts we enable for arch_local_irq_enable().
30 * This is initialized to have just a single interrupt that the kernel 30 * This is initialized to have just a single interrupt that the kernel
31 * doesn't actually use as a sentinel. During kernel init, 31 * doesn't actually use as a sentinel. During kernel init,
32 * interrupts are added as the kernel gets prepared to support them. 32 * interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
227#if CHIP_HAS_IPI() 227#if CHIP_HAS_IPI()
228 raw_local_irq_unmask(INT_IPI_K); 228 arch_local_irq_unmask(INT_IPI_K);
229#endif 229#endif
230} 230}
231 231
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d6179..0d8b9e933487 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
182 182
183 if ((entry & IND_SOURCE)) { 183 if ((entry & IND_SOURCE)) {
184 void *va = 184 void *va =
185 kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 185 kmap_atomic_pfn(entry >> PAGE_SHIFT);
186 r = kexec_bn2cl(va); 186 r = kexec_bn2cl(va);
187 if (r) { 187 if (r) {
188 command_line = r; 188 command_line = r;
189 break; 189 break;
190 } 190 }
191 kunmap_atomic(va, KM_USER0); 191 kunmap_atomic(va);
192 } 192 }
193 } 193 }
194 194
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
198 198
199 hverr = hv_set_command_line( 199 hverr = hv_set_command_line(
200 (HV_VirtAddr) command_line, strlen(command_line)); 200 (HV_VirtAddr) command_line, strlen(command_line));
201 kunmap_atomic(command_line, KM_USER0); 201 kunmap_atomic(command_line);
202 } else { 202 } else {
203 pr_info("%s: no command line found; making empty\n", 203 pr_info("%s: no command line found; making empty\n",
204 __func__); 204 __func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e3933f726..0858ee6b520f 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
35 35
36 /* Make sure downcall interrupts will be enabled. */ 36 /* Make sure downcall interrupts will be enabled. */
37 raw_local_irq_unmask(INT_INTCTRL_K); 37 arch_local_irq_unmask(INT_INTCTRL_K);
38} 38}
39 39
40void hv_message_intr(struct pt_regs *regs, int intnum) 40void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9cd29884c09f..e92e40527d6d 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -50,10 +50,10 @@ long arch_ptrace(struct task_struct *child, long request,
50{ 50{
51 unsigned long __user *datap = (long __user __force *)data; 51 unsigned long __user *datap = (long __user __force *)data;
52 unsigned long tmp; 52 unsigned long tmp;
53 int i;
54 long ret = -EIO; 53 long ret = -EIO;
55 unsigned long *childregs;
56 char *childreg; 54 char *childreg;
55 struct pt_regs copyregs;
56 int ex1_offset;
57 57
58 switch (request) { 58 switch (request) {
59 59
@@ -80,6 +80,16 @@ long arch_ptrace(struct task_struct *child, long request,
80 if (addr >= PTREGS_SIZE) 80 if (addr >= PTREGS_SIZE)
81 break; 81 break;
82 childreg = (char *)task_pt_regs(child) + addr; 82 childreg = (char *)task_pt_regs(child) + addr;
83
84 /* Guard against overwrites of the privilege level. */
85 ex1_offset = PTREGS_OFFSET_EX1;
86#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
87 if (is_compat_task()) /* point at low word */
88 ex1_offset += sizeof(compat_long_t);
89#endif
90 if (addr == ex1_offset)
91 data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
92
83#ifdef CONFIG_COMPAT 93#ifdef CONFIG_COMPAT
84 if (is_compat_task()) { 94 if (is_compat_task()) {
85 if (addr & (sizeof(compat_long_t)-1)) 95 if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@ long arch_ptrace(struct task_struct *child, long request,
96 break; 106 break;
97 107
98 case PTRACE_GETREGS: /* Get all registers from the child. */ 108 case PTRACE_GETREGS: /* Get all registers from the child. */
99 if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 109 if (copy_to_user(datap, task_pt_regs(child),
100 break; 110 sizeof(struct pt_regs)) == 0) {
101 childregs = (long *)task_pt_regs(child); 111 ret = 0;
102 for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
103 ++i) {
104 ret = __put_user(childregs[i], &datap[i]);
105 if (ret != 0)
106 break;
107 } 112 }
108 break; 113 break;
109 114
110 case PTRACE_SETREGS: /* Set all registers in the child. */ 115 case PTRACE_SETREGS: /* Set all registers in the child. */
111 if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 116 if (copy_from_user(&copyregs, datap,
112 break; 117 sizeof(struct pt_regs)) == 0) {
113 childregs = (long *)task_pt_regs(child); 118 copyregs.ex1 =
114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 119 PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
115 ++i) { 120 *task_pt_regs(child) = copyregs;
116 ret = __get_user(childregs[i], &datap[i]); 121 ret = 0;
117 if (ret != 0)
118 break;
119 } 122 }
120 break; 123 break;
121 124
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beba..baa3d905fee2 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
27void machine_halt(void) 27void machine_halt(void)
28{ 28{
29 warn_early_printk(); 29 warn_early_printk();
30 raw_local_irq_disable_all(); 30 arch_local_irq_disable_all();
31 smp_send_stop(); 31 smp_send_stop();
32 hv_halt(); 32 hv_halt();
33} 33}
@@ -35,14 +35,14 @@ void machine_halt(void)
35void machine_power_off(void) 35void machine_power_off(void)
36{ 36{
37 warn_early_printk(); 37 warn_early_printk();
38 raw_local_irq_disable_all(); 38 arch_local_irq_disable_all();
39 smp_send_stop(); 39 smp_send_stop();
40 hv_power_off(); 40 hv_power_off();
41} 41}
42 42
43void machine_restart(char *cmd) 43void machine_restart(char *cmd)
44{ 44{
45 raw_local_irq_disable_all(); 45 arch_local_irq_disable_all();
46 smp_send_stop(); 46 smp_send_stop();
47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
48} 48}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ae51cad12da0..fb0b3cbeae14 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -868,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
868 868
869 /* Allow asynchronous TLB interrupts. */ 869 /* Allow asynchronous TLB interrupts. */
870#if CHIP_HAS_TILE_DMA() 870#if CHIP_HAS_TILE_DMA()
871 raw_local_irq_unmask(INT_DMATLB_MISS); 871 arch_local_irq_unmask(INT_DMATLB_MISS);
872 raw_local_irq_unmask(INT_DMATLB_ACCESS); 872 arch_local_irq_unmask(INT_DMATLB_ACCESS);
873#endif 873#endif
874#if CHIP_HAS_SN_PROC() 874#if CHIP_HAS_SN_PROC()
875 raw_local_irq_unmask(INT_SNITLB_MISS); 875 arch_local_irq_unmask(INT_SNITLB_MISS);
876#endif 876#endif
877#ifdef __tilegx__ 877#ifdef __tilegx__
878 raw_local_irq_unmask(INT_SINGLE_STEP_K); 878 arch_local_irq_unmask(INT_SINGLE_STEP_K);
879#endif 879#endif
880 880
881 /* 881 /*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85ae3ae..687719d4abd1 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -71,6 +71,9 @@ int restore_sigcontext(struct pt_regs *regs,
71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
72 err |= __get_user(regs->regs[i], &sc->gregs[i]); 72 err |= __get_user(regs->regs[i], &sc->gregs[i]);
73 73
74 /* Ensure that the PL is always set to USER_PL. */
75 regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
76
74 regs->faultnum = INT_SWINT_1_SIGRETURN; 77 regs->faultnum = INT_SWINT_1_SIGRETURN;
75 78
76 err |= __get_user(*pr0, &sc->gregs[0]); 79 err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +333,7 @@ void do_signal(struct pt_regs *regs)
330 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 333 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
331 } 334 }
332 335
333 return; 336 goto done;
334 } 337 }
335 338
336 /* Did we come from a system call? */ 339 /* Did we come from a system call? */
@@ -358,4 +361,8 @@ void do_signal(struct pt_regs *regs)
358 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 361 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
359 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 362 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
360 } 363 }
364
365done:
366 /* Avoid double syscall restart if there are nested signals. */
367 regs->faultnum = INT_SWINT_1_SIGRETURN;
361} 368}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d90aff3..9575b37a8b75 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
115static void smp_stop_cpu_interrupt(void) 115static void smp_stop_cpu_interrupt(void)
116{ 116{
117 set_cpu_online(smp_processor_id(), 0); 117 set_cpu_online(smp_processor_id(), 0);
118 raw_local_irq_disable_all(); 118 arch_local_irq_disable_all();
119 for (;;) 119 for (;;)
120 asm("nap"); 120 asm("nap");
121} 121}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820e1421..f2e156e44692 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
132{ 132{
133 BUG_ON(ticks > MAX_TICK); 133 BUG_ON(ticks > MAX_TICK);
134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
135 raw_local_irq_unmask_now(INT_TILE_TIMER); 135 arch_local_irq_unmask_now(INT_TILE_TIMER);
136 return 0; 136 return 0;
137} 137}
138 138
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
143static void tile_timer_set_mode(enum clock_event_mode mode, 143static void tile_timer_set_mode(enum clock_event_mode mode,
144 struct clock_event_device *evt) 144 struct clock_event_device *evt)
145{ 145{
146 raw_local_irq_mask_now(INT_TILE_TIMER); 146 arch_local_irq_mask_now(INT_TILE_TIMER);
147} 147}
148 148
149/* 149/*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
172 evt->cpumask = cpumask_of(smp_processor_id()); 172 evt->cpumask = cpumask_of(smp_processor_id());
173 173
174 /* Start out with timer not firing. */ 174 /* Start out with timer not firing. */
175 raw_local_irq_mask_now(INT_TILE_TIMER); 175 arch_local_irq_mask_now(INT_TILE_TIMER);
176 176
177 /* Register tile timer. */ 177 /* Register tile timer. */
178 clockevents_register_device(evt); 178 clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
188 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
189 * and there are now by definition no events pending. 189 * and there are now by definition no events pending.
190 */ 190 */
191 raw_local_irq_mask(INT_TILE_TIMER); 191 arch_local_irq_mask(INT_TILE_TIMER);
192 192
193 /* Track time spent here in an interrupt context */ 193 /* Track time spent here in an interrupt context */
194 irq_enter(); 194 irq_enter();
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7b266b..f7d4a6ad61e8 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
54 * we must run with interrupts disabled to avoid the risk of some 54 * we must run with interrupts disabled to avoid the risk of some
55 * other code seeing the incoherent data in our cache. (Recall that 55 * other code seeing the incoherent data in our cache. (Recall that
56 * our cache is indexed by PA, so even if the other code doesn't use 56 * our cache is indexed by PA, so even if the other code doesn't use
57 * our KM_MEMCPY virtual addresses, they'll still hit in cache using 57 * our kmap_atomic virtual addresses, they'll still hit in cache using
58 * the normal VAs that aren't supposed to hit in cache.) 58 * the normal VAs that aren't supposed to hit in cache.)
59 */ 59 */
60static void memcpy_multicache(void *dest, const void *source, 60static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@ static void memcpy_multicache(void *dest, const void *source,
64 unsigned long flags, newsrc, newdst; 64 unsigned long flags, newsrc, newdst;
65 pmd_t *pmdp; 65 pmd_t *pmdp;
66 pte_t *ptep; 66 pte_t *ptep;
67 int type0, type1;
67 int cpu = get_cpu(); 68 int cpu = get_cpu();
68 69
69 /* 70 /*
@@ -77,7 +78,8 @@ static void memcpy_multicache(void *dest, const void *source,
77 sim_allow_multiple_caching(1); 78 sim_allow_multiple_caching(1);
78 79
79 /* Set up the new dest mapping */ 80 /* Set up the new dest mapping */
80 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; 81 type0 = kmap_atomic_idx_push();
82 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
81 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
82 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
83 ptep = pte_offset_kernel(pmdp, newdst); 85 ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@ static void memcpy_multicache(void *dest, const void *source,
87 } 89 }
88 90
89 /* Set up the new source mapping */ 91 /* Set up the new source mapping */
90 idx += (KM_MEMCPY0 - KM_MEMCPY1); 92 type1 = kmap_atomic_idx_push();
93 idx += (type0 - type1);
91 src_pte = hv_pte_set_nc(src_pte); 94 src_pte = hv_pte_set_nc(src_pte);
92 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 95 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
93 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); 96 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@ static void memcpy_multicache(void *dest, const void *source,
119 * We're done: notify the simulator that all is back to normal, 122 * We're done: notify the simulator that all is back to normal,
120 * and re-enable interrupts and pre-emption. 123 * and re-enable interrupts and pre-emption.
121 */ 124 */
125 kmap_atomic_idx_pop();
126 kmap_atomic_idx_pop();
122 sim_allow_multiple_caching(0); 127 sim_allow_multiple_caching(0);
123 local_irq_restore(flags); 128 local_irq_restore(flags);
124 put_cpu(); 129 put_cpu();
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index abb57331cf6e..31dbbd9afe47 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
227void *__kmap_atomic(struct page *page) 227void *__kmap_atomic(struct page *page)
228{ 228{
229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
230 return kmap_atomic_prot(page, type, PAGE_NONE); 230 return kmap_atomic_prot(page, PAGE_NONE);
231} 231}
232EXPORT_SYMBOL(__kmap_atomic); 232EXPORT_SYMBOL(__kmap_atomic);
233 233
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 78e1982cb6c9..0b9ce69b0ee5 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@ static long __write_once initfree = 1;
988/* Select whether to free (1) or mark unusable (0) the __init pages. */ 988/* Select whether to free (1) or mark unusable (0) the __init pages. */
989static int __init set_initfree(char *str) 989static int __init set_initfree(char *str)
990{ 990{
991 strict_strtol(str, 0, &initfree); 991 long val;
992 pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); 992 if (strict_strtol(str, 0, &val)) {
993 initfree = val;
994 pr_info("initfree: %s free init pages\n",
995 initfree ? "will" : "won't");
996 }
993 return 1; 997 return 1;
994} 998}
995__setup("initfree=", set_initfree); 999__setup("initfree=", set_initfree);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c24621c41..1f5430c53d0d 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
134} 134}
135 135
136#if defined(CONFIG_HIGHPTE) 136#if defined(CONFIG_HIGHPTE)
137pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) 137pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
138{ 138{
139 pte_t *pte = kmap_atomic(pmd_page(*dir), type) + 139 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
141 return &pte[pte_index(address)]; 141 return &pte[pte_index(address)];
142} 142}