aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-09-14 20:07:33 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-09-14 20:07:33 -0400
commit2605a103cadb29053d6bc7e81c7de802ec75ce6c (patch)
treee2a83845f59217d66252c7a9265ff99a3b69f525
parent53a3f3087be361dacfc02e7a85b6d6142a41ce8a (diff)
parentebb2a97b2e7422176d52f4f33e3ee400653875b4 (diff)
Merge git://git.linux-xtensa.org/kernel/xtensa-feed
* git://git.linux-xtensa.org/kernel/xtensa-feed: [patch 1/2] Xtensa: enable arbitary tty speed setting ioctls [patch 2/2] xtensa console.c: remove duplicate #include [XTENSA] Add support for cache-aliasing [XTENSA] Add kernel module support [XTENSA] Add support for executable/non-executable feature in the mmu [XTENSA] Use the generic version of get_order [XTENSA] Initialize semaphore_wake_lock [XTENSA] Add typecast macro for constants [XTENSA] Fix timer instabilities. [XTENSA] Fix fadvise64_64 [XTENSA] Remove extraneous include statement [XTENSA] Move string-io functions to io.c from pci.c [XTENSA] Move pre-initialized structures to init_task.c [XTENSA] Add freestanding option to CFLAGS [XTENSA] Add getpgrp system-call to unistd.h [XTENSA] add missing system calls [XTENSA] fix wrong usage of __init and __initdata in traps.c
-rw-r--r--arch/xtensa/Makefile7
-rw-r--r--arch/xtensa/kernel/Makefile2
-rw-r--r--arch/xtensa/kernel/asm-offsets.c13
-rw-r--r--arch/xtensa/kernel/entry.S115
-rw-r--r--arch/xtensa/kernel/init_task.c38
-rw-r--r--arch/xtensa/kernel/io.c75
-rw-r--r--arch/xtensa/kernel/module.c195
-rw-r--r--arch/xtensa/kernel/pci.c69
-rw-r--r--arch/xtensa/kernel/process.c14
-rw-r--r--arch/xtensa/kernel/semaphore.c2
-rw-r--r--arch/xtensa/kernel/syscall.c5
-rw-r--r--arch/xtensa/kernel/time.c46
-rw-r--r--arch/xtensa/kernel/traps.c4
-rw-r--r--arch/xtensa/mm/Makefile6
-rw-r--r--arch/xtensa/mm/cache.c256
-rw-r--r--arch/xtensa/mm/fault.c6
-rw-r--r--arch/xtensa/mm/init.c252
-rw-r--r--arch/xtensa/mm/misc.S306
-rw-r--r--arch/xtensa/platform-iss/console.c1
-rw-r--r--include/asm-xtensa/bugs.h6
-rw-r--r--include/asm-xtensa/cache.h9
-rw-r--r--include/asm-xtensa/cacheflush.h81
-rw-r--r--include/asm-xtensa/elf.h50
-rw-r--r--include/asm-xtensa/io.h1
-rw-r--r--include/asm-xtensa/ioctls.h4
-rw-r--r--include/asm-xtensa/page.h106
-rw-r--r--include/asm-xtensa/pgalloc.h107
-rw-r--r--include/asm-xtensa/pgtable.h235
-rw-r--r--include/asm-xtensa/processor.h2
-rw-r--r--include/asm-xtensa/syscall.h24
-rw-r--r--include/asm-xtensa/termbits.h5
-rw-r--r--include/asm-xtensa/termios.h6
-rw-r--r--include/asm-xtensa/timex.h4
-rw-r--r--include/asm-xtensa/tlb.h30
-rw-r--r--include/asm-xtensa/types.h9
-rw-r--r--include/asm-xtensa/unistd.h128
36 files changed, 1515 insertions, 704 deletions
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 95f836db38fa..acf05be24929 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -27,7 +27,12 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
27PLATFORM = $(platform-y) 27PLATFORM = $(platform-y)
28export PLATFORM 28export PLATFORM
29 29
30CFLAGS += -pipe -mlongcalls 30# temporarily until string.h is fixed
31cflags-y += -ffreestanding
32
33cflags-y += -pipe -mlongcalls
34
35CFLAGS += $(cflags-y)
31 36
32KBUILD_DEFCONFIG := iss_defconfig 37KBUILD_DEFCONFIG := iss_defconfig
33 38
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 71f733c4f66d..f582d6a24ec2 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds
7 7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ 8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ 9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
10 pci-dma.o 10 pci-dma.o init_task.o io.o
11 11
12## windowspill.o 12## windowspill.o
13 13
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index d0323cd6a2ea..d5ffe7b6443e 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -18,12 +18,13 @@
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/thread_info.h> 19#include <linux/thread_info.h>
20#include <linux/ptrace.h> 20#include <linux/ptrace.h>
21#include <linux/mm.h>
22
21#include <asm/ptrace.h> 23#include <asm/ptrace.h>
22#include <asm/processor.h> 24#include <asm/processor.h>
23#include <asm/uaccess.h> 25#include <asm/uaccess.h>
24 26
25#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 27#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
26#define BLANK() asm volatile("\n->" : : )
27 28
28int main(void) 29int main(void)
29{ 30{
@@ -63,7 +64,6 @@ int main(void)
63 DEFINE(PT_SIZE, sizeof(struct pt_regs)); 64 DEFINE(PT_SIZE, sizeof(struct pt_regs));
64 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); 65 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
65 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); 66 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
66 BLANK();
67 67
68 /* struct task_struct */ 68 /* struct task_struct */
69 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); 69 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
@@ -73,27 +73,26 @@ int main(void)
73 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); 73 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
74 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); 74 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
75 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); 75 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
76 BLANK();
77 76
78 /* struct thread_info (offset from start_struct) */ 77 /* struct thread_info (offset from start_struct) */
79 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); 78 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
80 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); 79 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
81 DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save)); 80 DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
82 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); 81 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
83 BLANK();
84 82
85 /* struct mm_struct */ 83 /* struct mm_struct */
86 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); 84 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
87 DEFINE(MM_PGD, offsetof (struct mm_struct, pgd)); 85 DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
88 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context)); 86 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
89 BLANK(); 87
90 DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT); 88 /* struct page */
89 DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
91 90
92 /* constants */ 91 /* constants */
93 DEFINE(_CLONE_VM, CLONE_VM); 92 DEFINE(_CLONE_VM, CLONE_VM);
94 DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED); 93 DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
94 DEFINE(PG_ARCH_1, PG_arch_1);
95 95
96 return 0; 96 return 0;
97} 97}
98 98
99
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 8dc7a2c26ff9..91a689eca43d 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -7,7 +7,7 @@
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 * 9 *
10 * Copyright (C) 2004-2005 by Tensilica Inc. 10 * Copyright (C) 2004-2007 by Tensilica Inc.
11 * 11 *
12 * Chris Zankel <chris@zankel.net> 12 * Chris Zankel <chris@zankel.net>
13 * 13 *
@@ -169,7 +169,7 @@ _user_exception:
169 * We have to save all registers up to the first '1' from 169 * We have to save all registers up to the first '1' from
170 * the right, except the current frame (bit 0). 170 * the right, except the current frame (bit 0).
171 * Assume a2 is: 001001000110001 171 * Assume a2 is: 001001000110001
172 * All regiser frames starting from the top fiel to the marked '1' 172 * All register frames starting from the top field to the marked '1'
173 * must be saved. 173 * must be saved.
174 */ 174 */
175 175
@@ -1572,10 +1572,12 @@ ENTRY(fast_second_level_miss)
1572 l32i a0, a1, TASK_MM # tsk->mm 1572 l32i a0, a1, TASK_MM # tsk->mm
1573 beqz a0, 9f 1573 beqz a0, 9f
1574 1574
15758: rsr a1, EXCVADDR # fault address 1575
1576 _PGD_OFFSET(a0, a1, a1) 1576 /* We deliberately destroy a3 that holds the exception table. */
1577
15788: rsr a3, EXCVADDR # fault address
1579 _PGD_OFFSET(a0, a3, a1)
1577 l32i a0, a0, 0 # read pmdval 1580 l32i a0, a0, 0 # read pmdval
1578 //beqi a0, _PAGE_USER, 2f
1579 beqz a0, 2f 1581 beqz a0, 2f
1580 1582
1581 /* Read ptevaddr and convert to top of page-table page. 1583 /* Read ptevaddr and convert to top of page-table page.
@@ -1588,7 +1590,7 @@ ENTRY(fast_second_level_miss)
1588 * The messy computation for 'pteval' above really simplifies 1590 * The messy computation for 'pteval' above really simplifies
1589 * into the following: 1591 * into the following:
1590 * 1592 *
1591 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL 1593 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
1592 */ 1594 */
1593 1595
1594 movi a1, -PAGE_OFFSET 1596 movi a1, -PAGE_OFFSET
@@ -1596,20 +1598,34 @@ ENTRY(fast_second_level_miss)
1596 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1598 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1597 xor a0, a0, a1 1599 xor a0, a0, a1
1598 1600
1599 1601 movi a1, _PAGE_DIRECTORY
1600 movi a1, PAGE_DIRECTORY
1601 or a0, a0, a1 # ... | PAGE_DIRECTORY 1602 or a0, a0, a1 # ... | PAGE_DIRECTORY
1602 1603
1604 /*
1605 * We utilize all three wired-ways (7-9) to hold pmd translations.
1606 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1607 * This allows to map the three most common regions to three different
1608 * DTLBs:
1609 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
1610 * 2 -> way 8 shared libaries (2000.0000)
1611 * 3 -> way 0 stack (3000.0000)
1612 */
1613
1614 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
1603 rsr a1, PTEVADDR 1615 rsr a1, PTEVADDR
1616 addx2 a3, a3, a3 # -> 0,3,6,9
1604 srli a1, a1, PAGE_SHIFT 1617 srli a1, a1, PAGE_SHIFT
1618 extui a3, a3, 2, 2 # -> 0,0,1,2
1605 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1619 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1606 addi a1, a1, DTLB_WAY_PGD # ... + way_number 1620 addi a3, a3, DTLB_WAY_PGD
1621 add a1, a1, a3 # ... + way_number
1607 1622
1608 wdtlb a0, a1 16233: wdtlb a0, a1
1609 dsync 1624 dsync
1610 1625
1611 /* Exit critical section. */ 1626 /* Exit critical section. */
1612 1627
16284: movi a3, exc_table # restore a3
1613 movi a0, 0 1629 movi a0, 0
1614 s32i a0, a3, EXC_TABLE_FIXUP 1630 s32i a0, a3, EXC_TABLE_FIXUP
1615 1631
@@ -1636,8 +1652,76 @@ ENTRY(fast_second_level_miss)
16369: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 16529: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1637 j 8b 1653 j 8b
1638 1654
1655#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1656
16572: /* Special case for cache aliasing.
1658 * We (should) only get here if a clear_user_page, copy_user_page
1659 * or the aliased cache flush functions got preemptively interrupted
1660 * by another task. Re-establish temporary mapping to the
1661 * TLBTEMP_BASE areas.
1662 */
1663
1664 /* We shouldn't be in a double exception */
1665
1666 l32i a0, a2, PT_DEPC
1667 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1668
1669 /* Make sure the exception originated in the special functions */
1670
1671 movi a0, __tlbtemp_mapping_start
1672 rsr a3, EPC_1
1673 bltu a3, a0, 2f
1674 movi a0, __tlbtemp_mapping_end
1675 bgeu a3, a0, 2f
1676
1677 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1678
1679 movi a3, TLBTEMP_BASE_1
1680 rsr a0, EXCVADDR
1681 bltu a0, a3, 2f
1682
1683 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
1684 bgeu a1, a3, 2f
1685
1686 /* Check if we have to restore an ITLB mapping. */
1687
1688 movi a1, __tlbtemp_mapping_itlb
1689 rsr a3, EPC_1
1690 sub a3, a3, a1
1691
1692 /* Calculate VPN */
1693
1694 movi a1, PAGE_MASK
1695 and a1, a1, a0
1696
1697 /* Jump for ITLB entry */
1698
1699 bgez a3, 1f
1700
1701 /* We can use up to two TLBTEMP areas, one for src and one for dst. */
1702
1703 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1704 add a1, a3, a1
1705
1706 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1707
1708 mov a0, a6
1709 movnez a0, a7, a3
1710 j 3b
1711
1712 /* ITLB entry. We only use dst in a6. */
1713
17141: witlb a6, a1
1715 isync
1716 j 4b
1717
1718
1719#endif // DCACHE_WAY_SIZE > PAGE_SIZE
1720
1721
16392: /* Invalid PGD, default exception handling */ 17222: /* Invalid PGD, default exception handling */
1640 1723
1724 movi a3, exc_table
1641 rsr a1, DEPC 1725 rsr a1, DEPC
1642 xsr a3, EXCSAVE_1 1726 xsr a3, EXCSAVE_1
1643 s32i a1, a2, PT_AREG2 1727 s32i a1, a2, PT_AREG2
@@ -1682,15 +1766,15 @@ ENTRY(fast_store_prohibited)
16828: rsr a1, EXCVADDR # fault address 17668: rsr a1, EXCVADDR # fault address
1683 _PGD_OFFSET(a0, a1, a4) 1767 _PGD_OFFSET(a0, a1, a4)
1684 l32i a0, a0, 0 1768 l32i a0, a0, 0
1685 //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
1686 beqz a0, 2f 1769 beqz a0, 2f
1687 1770
1771 /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
1772
1688 _PTE_OFFSET(a0, a1, a4) 1773 _PTE_OFFSET(a0, a1, a4)
1689 l32i a4, a0, 0 # read pteval 1774 l32i a4, a0, 0 # read pteval
1690 movi a1, _PAGE_VALID | _PAGE_RW 1775 bbci.l a4, _PAGE_WRITABLE_BIT, 2f
1691 bnall a4, a1, 2f
1692 1776
1693 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE 1777 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1694 or a4, a4, a1 1778 or a4, a4, a1
1695 rsr a1, EXCVADDR 1779 rsr a1, EXCVADDR
1696 s32i a4, a0, 0 1780 s32i a4, a0, 0
@@ -1700,10 +1784,7 @@ ENTRY(fast_store_prohibited)
1700 dhwb a0, 0 1784 dhwb a0, 0
1701#endif 1785#endif
1702 pdtlb a0, a1 1786 pdtlb a0, a1
1703 beqz a0, 1f
1704 idtlb a0 // FIXME do we need this?
1705 wdtlb a4, a0 1787 wdtlb a4, a0
17061:
1707 1788
1708 /* Exit critical section. */ 1789 /* Exit critical section. */
1709 1790
diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c
new file mode 100644
index 000000000000..021b4f46ff94
--- /dev/null
+++ b/arch/xtensa/kernel/init_task.c
@@ -0,0 +1,38 @@
1/*
2 * arch/xtensa/kernel/init_task.c
3 *
4 * Xtensa Processor version.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2007 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/mm.h>
16#include <linux/fs.h>
17#include <linux/init.h>
18#include <linux/init_task.h>
19#include <linux/module.h>
20#include <linux/mqueue.h>
21
22#include <asm/uaccess.h>
23
24static struct fs_struct init_fs = INIT_FS;
25static struct files_struct init_files = INIT_FILES;
26static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
27static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
28struct mm_struct init_mm = INIT_MM(init_mm);
29
30EXPORT_SYMBOL(init_mm);
31
32union thread_union init_thread_union
33 __attribute__((__section__(".data.init_task"))) =
34{ INIT_THREAD_INFO(init_task) };
35
36struct task_struct init_task = INIT_TASK(init_task);
37
38EXPORT_SYMBOL(init_task);
diff --git a/arch/xtensa/kernel/io.c b/arch/xtensa/kernel/io.c
new file mode 100644
index 000000000000..5b65269b1d2f
--- /dev/null
+++ b/arch/xtensa/kernel/io.c
@@ -0,0 +1,75 @@
1/*
2 * arch/xtensa/io.c
3 *
4 * IO primitives
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copied from sparc.
12 *
13 * Chris Zankel <chris@zankel.net>
14 *
15 */
16
17#include <asm/io.h>
18#include <asm/byteorder.h>
19
20void outsb(unsigned long addr, const void *src, unsigned long count) {
21 while (count) {
22 count -= 1;
23 writeb(*(const char *)src, addr);
24 src += 1;
25 addr += 1;
26 }
27}
28
29void outsw(unsigned long addr, const void *src, unsigned long count) {
30 while (count) {
31 count -= 2;
32 writew(*(const short *)src, addr);
33 src += 2;
34 addr += 2;
35 }
36}
37
38void outsl(unsigned long addr, const void *src, unsigned long count) {
39 while (count) {
40 count -= 4;
41 writel(*(const long *)src, addr);
42 src += 4;
43 addr += 4;
44 }
45}
46
47void insb(unsigned long addr, void *dst, unsigned long count) {
48 while (count) {
49 count -= 1;
50 *(unsigned char *)dst = readb(addr);
51 dst += 1;
52 addr += 1;
53 }
54}
55
56void insw(unsigned long addr, void *dst, unsigned long count) {
57 while (count) {
58 count -= 2;
59 *(unsigned short *)dst = readw(addr);
60 dst += 2;
61 addr += 2;
62 }
63}
64
65void insl(unsigned long addr, void *dst, unsigned long count) {
66 while (count) {
67 count -= 4;
68 /*
69 * XXX I am sure we are in for an unaligned trap here.
70 */
71 *(unsigned long *)dst = readl(addr);
72 dst += 4;
73 addr += 4;
74 }
75}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index 2ea1755a0858..ddf14dcf2ad9 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -7,7 +7,7 @@
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 * 9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 10 * Copyright (C) 2001 - 2006 Tensilica Inc.
11 * 11 *
12 * Chris Zankel <chris@zankel.net> 12 * Chris Zankel <chris@zankel.net>
13 * 13 *
@@ -22,57 +22,216 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/cache.h> 23#include <linux/cache.h>
24 24
25LIST_HEAD(module_buf_list); 25#undef DEBUG_RELOCATE
26 26
27void *module_alloc(unsigned long size) 27void *module_alloc(unsigned long size)
28{ 28{
29 panic("module_alloc not implemented"); 29 if (size == 0)
30 return NULL;
31 return vmalloc(size);
30} 32}
31 33
32void module_free(struct module *mod, void *module_region) 34void module_free(struct module *mod, void *module_region)
33{ 35{
34 panic("module_free not implemented"); 36 vfree(module_region);
37 /* FIXME: If module_region == mod->init_region, trim exception
38 table entries. */
35} 39}
36 40
37int module_frob_arch_sections(Elf32_Ehdr *hdr, 41int module_frob_arch_sections(Elf32_Ehdr *hdr,
38 Elf32_Shdr *sechdrs, 42 Elf32_Shdr *sechdrs,
39 char *secstrings, 43 char *secstrings,
40 struct module *me) 44 struct module *mod)
41{ 45{
42 panic("module_frob_arch_sections not implemented"); 46 return 0;
47}
48
49static int
50decode_calln_opcode (unsigned char *location)
51{
52#ifdef __XTENSA_EB__
53 return (location[0] & 0xf0) == 0x50;
54#endif
55#ifdef __XTENSA_EL__
56 return (location[0] & 0xf) == 0x5;
57#endif
58}
59
60static int
61decode_l32r_opcode (unsigned char *location)
62{
63#ifdef __XTENSA_EB__
64 return (location[0] & 0xf0) == 0x10;
65#endif
66#ifdef __XTENSA_EL__
67 return (location[0] & 0xf) == 0x1;
68#endif
43} 69}
44 70
45int apply_relocate(Elf32_Shdr *sechdrs, 71int apply_relocate(Elf32_Shdr *sechdrs,
46 const char *strtab, 72 const char *strtab,
47 unsigned int symindex, 73 unsigned int symindex,
48 unsigned int relsec, 74 unsigned int relsec,
49 struct module *module) 75 struct module *mod)
50{ 76{
51 panic ("apply_relocate not implemented"); 77 printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
78 mod->name);
79 return -ENOEXEC;
80
52} 81}
53 82
54int apply_relocate_add(Elf32_Shdr *sechdrs, 83int apply_relocate_add(Elf32_Shdr *sechdrs,
55 const char *strtab, 84 const char *strtab,
56 unsigned int symindex, 85 unsigned int symindex,
57 unsigned int relsec, 86 unsigned int relsec,
58 struct module *module) 87 struct module *mod)
59{ 88{
60 panic("apply_relocate_add not implemented"); 89 unsigned int i;
90 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
91 Elf32_Sym *sym;
92 unsigned char *location;
93 uint32_t value;
94
95#ifdef DEBUG_RELOCATE
96 printk("Applying relocate section %u to %u\n", relsec,
97 sechdrs[relsec].sh_info);
98#endif
99 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
100 location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
101 + rela[i].r_offset;
102 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
103 + ELF32_R_SYM(rela[i].r_info);
104 value = sym->st_value + rela[i].r_addend;
105
106 switch (ELF32_R_TYPE(rela[i].r_info)) {
107 case R_XTENSA_NONE:
108 case R_XTENSA_DIFF8:
109 case R_XTENSA_DIFF16:
110 case R_XTENSA_DIFF32:
111 case R_XTENSA_ASM_EXPAND:
112 break;
113
114 case R_XTENSA_32:
115 case R_XTENSA_PLT:
116 *(uint32_t *)location += value;
117 break;
118
119 case R_XTENSA_SLOT0_OP:
120 if (decode_calln_opcode(location)) {
121 value -= ((unsigned long)location & -4) + 4;
122 if ((value & 3) != 0 ||
123 ((value + (1 << 19)) >> 20) != 0) {
124 printk("%s: relocation out of range, "
125 "section %d reloc %d "
126 "sym '%s'\n",
127 mod->name, relsec, i,
128 strtab + sym->st_name);
129 return -ENOEXEC;
130 }
131 value = (signed int)value >> 2;
132#ifdef __XTENSA_EB__
133 location[0] = ((location[0] & ~0x3) |
134 ((value >> 16) & 0x3));
135 location[1] = (value >> 8) & 0xff;
136 location[2] = value & 0xff;
137#endif
138#ifdef __XTENSA_EL__
139 location[0] = ((location[0] & ~0xc0) |
140 ((value << 6) & 0xc0));
141 location[1] = (value >> 2) & 0xff;
142 location[2] = (value >> 10) & 0xff;
143#endif
144 } else if (decode_l32r_opcode(location)) {
145 value -= (((unsigned long)location + 3) & -4);
146 if ((value & 3) != 0 ||
147 (signed int)value >> 18 != -1) {
148 printk("%s: relocation out of range, "
149 "section %d reloc %d "
150 "sym '%s'\n",
151 mod->name, relsec, i,
152 strtab + sym->st_name);
153 return -ENOEXEC;
154 }
155 value = (signed int)value >> 2;
156
157#ifdef __XTENSA_EB__
158 location[1] = (value >> 8) & 0xff;
159 location[2] = value & 0xff;
160#endif
161#ifdef __XTENSA_EL__
162 location[1] = value & 0xff;
163 location[2] = (value >> 8) & 0xff;
164#endif
165 }
166 /* FIXME: Ignore any other opcodes. The Xtensa
167 assembler currently assumes that the linker will
168 always do relaxation and so all PC-relative
169 operands need relocations. (The assembler also
170 writes out the tentative PC-relative values,
171 assuming no link-time relaxation, so it is usually
172 safe to ignore the relocations.) If the
173 assembler's "--no-link-relax" flag can be made to
174 work, and if all kernel modules can be assembled
175 with that flag, then unexpected relocations could
176 be detected here. */
177 break;
178
179 case R_XTENSA_SLOT1_OP:
180 case R_XTENSA_SLOT2_OP:
181 case R_XTENSA_SLOT3_OP:
182 case R_XTENSA_SLOT4_OP:
183 case R_XTENSA_SLOT5_OP:
184 case R_XTENSA_SLOT6_OP:
185 case R_XTENSA_SLOT7_OP:
186 case R_XTENSA_SLOT8_OP:
187 case R_XTENSA_SLOT9_OP:
188 case R_XTENSA_SLOT10_OP:
189 case R_XTENSA_SLOT11_OP:
190 case R_XTENSA_SLOT12_OP:
191 case R_XTENSA_SLOT13_OP:
192 case R_XTENSA_SLOT14_OP:
193 printk("%s: unexpected FLIX relocation: %u\n",
194 mod->name,
195 ELF32_R_TYPE(rela[i].r_info));
196 return -ENOEXEC;
197
198 case R_XTENSA_SLOT0_ALT:
199 case R_XTENSA_SLOT1_ALT:
200 case R_XTENSA_SLOT2_ALT:
201 case R_XTENSA_SLOT3_ALT:
202 case R_XTENSA_SLOT4_ALT:
203 case R_XTENSA_SLOT5_ALT:
204 case R_XTENSA_SLOT6_ALT:
205 case R_XTENSA_SLOT7_ALT:
206 case R_XTENSA_SLOT8_ALT:
207 case R_XTENSA_SLOT9_ALT:
208 case R_XTENSA_SLOT10_ALT:
209 case R_XTENSA_SLOT11_ALT:
210 case R_XTENSA_SLOT12_ALT:
211 case R_XTENSA_SLOT13_ALT:
212 case R_XTENSA_SLOT14_ALT:
213 printk("%s: unexpected ALT relocation: %u\n",
214 mod->name,
215 ELF32_R_TYPE(rela[i].r_info));
216 return -ENOEXEC;
217
218 default:
219 printk("%s: unexpected relocation: %u\n",
220 mod->name,
221 ELF32_R_TYPE(rela[i].r_info));
222 return -ENOEXEC;
223 }
224 }
225 return 0;
61} 226}
62 227
63int module_finalize(const Elf_Ehdr *hdr, 228int module_finalize(const Elf_Ehdr *hdr,
64 const Elf_Shdr *sechdrs, 229 const Elf_Shdr *sechdrs,
65 struct module *me) 230 struct module *mod)
66{ 231{
67 panic ("module_finalize not implemented"); 232 return 0;
68} 233}
69 234
70void module_arch_cleanup(struct module *mod) 235void module_arch_cleanup(struct module *mod)
71{ 236{
72 panic("module_arch_cleanup not implemented");
73}
74
75struct bug_entry *module_find_bug(unsigned long bugaddr)
76{
77 panic("module_find_bug not implemented");
78} 237}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 77deae5290f0..b7c073484e01 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -394,72 +394,3 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
394 394
395 return ret; 395 return ret;
396} 396}
397
398/*
399 * This probably belongs here rather than ioport.c because
400 * we do not want this crud linked into SBus kernels.
401 * Also, think for a moment about likes of floppy.c that
402 * include architecture specific parts. They may want to redefine ins/outs.
403 *
404 * We do not use horrible macros here because we want to
405 * advance pointer by sizeof(size).
406 */
407void outsb(unsigned long addr, const void *src, unsigned long count) {
408 while (count) {
409 count -= 1;
410 writeb(*(const char *)src, addr);
411 src += 1;
412 addr += 1;
413 }
414}
415
416void outsw(unsigned long addr, const void *src, unsigned long count) {
417 while (count) {
418 count -= 2;
419 writew(*(const short *)src, addr);
420 src += 2;
421 addr += 2;
422 }
423}
424
425void outsl(unsigned long addr, const void *src, unsigned long count) {
426 while (count) {
427 count -= 4;
428 writel(*(const long *)src, addr);
429 src += 4;
430 addr += 4;
431 }
432}
433
434void insb(unsigned long addr, void *dst, unsigned long count) {
435 while (count) {
436 count -= 1;
437 *(unsigned char *)dst = readb(addr);
438 dst += 1;
439 addr += 1;
440 }
441}
442
443void insw(unsigned long addr, void *dst, unsigned long count) {
444 while (count) {
445 count -= 2;
446 *(unsigned short *)dst = readw(addr);
447 dst += 2;
448 addr += 2;
449 }
450}
451
452void insl(unsigned long addr, void *dst, unsigned long count) {
453 while (count) {
454 count -= 4;
455 /*
456 * XXX I am sure we are in for an unaligned trap here.
457 */
458 *(unsigned long *)dst = readl(addr);
459 dst += 4;
460 addr += 4;
461 }
462}
463
464
465
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index dd498f1604e1..f53d7bd9dfb2 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -46,20 +46,6 @@
46 46
47extern void ret_from_fork(void); 47extern void ret_from_fork(void);
48 48
49static struct fs_struct init_fs = INIT_FS;
50static struct files_struct init_files = INIT_FILES;
51static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
52static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
53struct mm_struct init_mm = INIT_MM(init_mm);
54EXPORT_SYMBOL(init_mm);
55
56union thread_union init_thread_union
57 __attribute__((__section__(".data.init_task"))) =
58{ INIT_THREAD_INFO(init_task) };
59
60struct task_struct init_task = INIT_TASK(init_task);
61EXPORT_SYMBOL(init_task);
62
63struct task_struct *current_set[NR_CPUS] = {&init_task, }; 49struct task_struct *current_set[NR_CPUS] = {&init_task, };
64 50
65void (*pm_power_off)(void) = NULL; 51void (*pm_power_off)(void) = NULL;
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
index d40f4b1b75ac..995c6410ae10 100644
--- a/arch/xtensa/kernel/semaphore.c
+++ b/arch/xtensa/kernel/semaphore.c
@@ -100,7 +100,7 @@ static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
100 return ret; 100 return ret;
101} 101}
102 102
103spinlock_t semaphore_wake_lock; 103DEFINE_SPINLOCK(semaphore_wake_lock);
104 104
105/* 105/*
106 * Semaphores are implemented using a two-way counter: 106 * Semaphores are implemented using a two-way counter:
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index fe3834bc1dbf..f3e16efcd47a 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -93,3 +93,8 @@ asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
93 return (long)ret; 93 return (long)ret;
94} 94}
95 95
96asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
97{
98 return sys_fadvise64_64(fd, offset, len, advice);
99}
100
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 22949be4a5d8..60d29fe0b1bd 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -32,12 +32,20 @@ EXPORT_SYMBOL(rtc_lock);
32 32
33#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT 33#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
34unsigned long ccount_per_jiffy; /* per 1/HZ */ 34unsigned long ccount_per_jiffy; /* per 1/HZ */
35unsigned long ccount_nsec; /* nsec per ccount increment */ 35unsigned long nsec_per_ccount; /* nsec per ccount increment */
36#endif 36#endif
37 37
38unsigned int last_ccount_stamp;
39static long last_rtc_update = 0; 38static long last_rtc_update = 0;
40 39
40/*
41 * Scheduler clock - returns current tim in nanosec units.
42 */
43
44unsigned long long sched_clock(void)
45{
46 return (unsigned long long)jiffies * (1000000000 / HZ);
47}
48
41static irqreturn_t timer_interrupt(int irq, void *dev_id); 49static irqreturn_t timer_interrupt(int irq, void *dev_id);
42static struct irqaction timer_irqaction = { 50static struct irqaction timer_irqaction = {
43 .handler = timer_interrupt, 51 .handler = timer_interrupt,
@@ -69,7 +77,6 @@ void __init time_init(void)
69 77
70 xtime.tv_nsec = 0; 78 xtime.tv_nsec = 0;
71 last_rtc_update = xtime.tv_sec = sec_n; 79 last_rtc_update = xtime.tv_sec = sec_n;
72 last_ccount_stamp = get_ccount();
73 80
74 set_normalized_timespec(&wall_to_monotonic, 81 set_normalized_timespec(&wall_to_monotonic,
75 -xtime.tv_sec, -xtime.tv_nsec); 82 -xtime.tv_sec, -xtime.tv_nsec);
@@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv)
85{ 92{
86 time_t wtm_sec, sec = tv->tv_sec; 93 time_t wtm_sec, sec = tv->tv_sec;
87 long wtm_nsec, nsec = tv->tv_nsec; 94 long wtm_nsec, nsec = tv->tv_nsec;
88 unsigned long ccount; 95 unsigned long delta;
89 96
90 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 97 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
91 return -EINVAL; 98 return -EINVAL;
@@ -97,8 +104,10 @@ int do_settimeofday(struct timespec *tv)
97 * wall time. Discover what correction gettimeofday() would have 104 * wall time. Discover what correction gettimeofday() would have
98 * made, and then undo it! 105 * made, and then undo it!
99 */ 106 */
100 ccount = get_ccount(); 107
101 nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC; 108 delta = CCOUNT_PER_JIFFY;
109 delta += get_ccount() - get_linux_timer();
110 nsec -= delta * NSEC_PER_CCOUNT;
102 111
103 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 112 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
104 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 113 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -117,17 +126,21 @@ EXPORT_SYMBOL(do_settimeofday);
117void do_gettimeofday(struct timeval *tv) 126void do_gettimeofday(struct timeval *tv)
118{ 127{
119 unsigned long flags; 128 unsigned long flags;
120 unsigned long sec, usec, delta, seq; 129 unsigned long volatile sec, usec, delta, seq;
121 130
122 do { 131 do {
123 seq = read_seqbegin_irqsave(&xtime_lock, flags); 132 seq = read_seqbegin_irqsave(&xtime_lock, flags);
124 133
125 delta = get_ccount() - last_ccount_stamp;
126 sec = xtime.tv_sec; 134 sec = xtime.tv_sec;
127 usec = (xtime.tv_nsec / NSEC_PER_USEC); 135 usec = (xtime.tv_nsec / NSEC_PER_USEC);
136
137 delta = get_linux_timer() - get_ccount();
138
128 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 139 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
129 140
130 usec += (delta * CCOUNT_NSEC) / NSEC_PER_USEC; 141 usec += (((unsigned long) CCOUNT_PER_JIFFY - delta)
142 * (unsigned long) NSEC_PER_CCOUNT) / NSEC_PER_USEC;
143
131 for (; usec >= 1000000; sec++, usec -= 1000000) 144 for (; usec >= 1000000; sec++, usec -= 1000000)
132 ; 145 ;
133 146
@@ -158,9 +171,12 @@ again:
158 171
159 write_seqlock(&xtime_lock); 172 write_seqlock(&xtime_lock);
160 173
161 last_ccount_stamp = next; 174 do_timer(1); /* Linux handler in kernel/timer.c */
175
176 /* Note that writing CCOMPARE clears the interrupt. */
177
162 next += CCOUNT_PER_JIFFY; 178 next += CCOUNT_PER_JIFFY;
163 do_timer (1); /* Linux handler in kernel/timer.c */ 179 set_linux_timer(next);
164 180
165 if (ntp_synced() && 181 if (ntp_synced() &&
166 xtime.tv_sec - last_rtc_update >= 659 && 182 xtime.tv_sec - last_rtc_update >= 659 &&
@@ -175,19 +191,15 @@ again:
175 write_sequnlock(&xtime_lock); 191 write_sequnlock(&xtime_lock);
176 } 192 }
177 193
178 /* NOTE: writing CCOMPAREn clears the interrupt. */ 194 /* Allow platform to do something useful (Wdog). */
179 195
180 set_linux_timer (next); 196 platform_heartbeat();
181 197
182 /* Make sure we didn't miss any tick... */ 198 /* Make sure we didn't miss any tick... */
183 199
184 if ((signed long)(get_ccount() - next) > 0) 200 if ((signed long)(get_ccount() - next) > 0)
185 goto again; 201 goto again;
186 202
187 /* Allow platform to do something useful (Wdog). */
188
189 platform_heartbeat();
190
191 return IRQ_HANDLED; 203 return IRQ_HANDLED;
192} 204}
193 205
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c5e62f9d9f50..8be99c777d9d 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -83,7 +83,7 @@ typedef struct {
83 void* handler; 83 void* handler;
84} dispatch_init_table_t; 84} dispatch_init_table_t;
85 85
86dispatch_init_table_t __init dispatch_init_table[] = { 86static dispatch_init_table_t __initdata dispatch_init_table[] = {
87 87
88{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, 88{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
89{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel }, 89{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
@@ -305,7 +305,7 @@ do_debug(struct pt_regs *regs)
305 305
306#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler)) 306#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
307 307
308void trap_init(void) 308void __init trap_init(void)
309{ 309{
310 int i; 310 int i;
311 311
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index a5aed5932d7b..10aec22a8f98 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -5,9 +5,5 @@
5# removes any old dependencies. DON'T put your own dependencies here 5# removes any old dependencies. DON'T put your own dependencies here
6# unless it's something special (ie not a .c file). 6# unless it's something special (ie not a .c file).
7# 7#
8# Note 2! The CFLAGS definition is now in the main makefile...
9 8
10obj-y := init.o fault.o tlb.o misc.o 9obj-y := init.o fault.o tlb.o misc.o cache.o
11obj-m :=
12obj-n :=
13obj- :=
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
new file mode 100644
index 000000000000..9a1fa9478ae7
--- /dev/null
+++ b/arch/xtensa/mm/cache.c
@@ -0,0 +1,256 @@
1/*
2 * arch/xtensa/mm/cache.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2006 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
13 *
14 */
15
16#include <linux/init.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/bootmem.h>
25#include <linux/swap.h>
26#include <linux/pagemap.h>
27
28#include <asm/pgtable.h>
29#include <asm/bootparam.h>
30#include <asm/mmu_context.h>
31#include <asm/tlb.h>
32#include <asm/tlbflush.h>
33#include <asm/page.h>
34#include <asm/pgalloc.h>
35#include <asm/pgtable.h>
36
37//#define printd(x...) printk(x)
38#define printd(x...) do { } while(0)
39
40/*
41 * Note:
42 * The kernel provides one architecture bit PG_arch_1 in the page flags that
43 * can be used for cache coherency.
44 *
45 * I$-D$ coherency.
46 *
47 * The Xtensa architecture doesn't keep the instruction cache coherent with
48 * the data cache. We use the architecture bit to indicate if the caches
49 * are coherent. The kernel clears this bit whenever a page is added to the
50 * page cache. At that time, the caches might not be in sync. We, therefore,
51 * define this flag as 'clean' if set.
52 *
53 * D-cache aliasing.
54 *
55 * With cache aliasing, we have to always flush the cache when pages are
56 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
57 * page.
58 *
59 *
60 *
61 */
62
63#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
64
65/*
66 * Any time the kernel writes to a user page cache page, or it is about to
67 * read from a page cache page this routine is called.
68 *
69 */
70
71void flush_dcache_page(struct page *page)
72{
73 struct address_space *mapping = page_mapping(page);
74
75 /*
76 * If we have a mapping but the page is not mapped to user-space
77 * yet, we simply mark this page dirty and defer flushing the
78 * caches until update_mmu().
79 */
80
81 if (mapping && !mapping_mapped(mapping)) {
82 if (!test_bit(PG_arch_1, &page->flags))
83 set_bit(PG_arch_1, &page->flags);
84 return;
85
86 } else {
87
88 unsigned long phys = page_to_phys(page);
89 unsigned long temp = page->index << PAGE_SHIFT;
90 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
91 unsigned long virt;
92
93 /*
94 * Flush the page in kernel space and user space.
95 * Note that we can omit that step if aliasing is not
96 * an issue, but we do have to synchronize I$ and D$
97 * if we have a mapping.
98 */
99
100 if (!alias && !mapping)
101 return;
102
103 __flush_invalidate_dcache_page((long)page_address(page));
104
105 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
106
107 if (alias)
108 __flush_invalidate_dcache_page_alias(virt, phys);
109
110 if (mapping)
111 __invalidate_icache_page_alias(virt, phys);
112 }
113
114 /* There shouldn't be an entry in the cache for this page anymore. */
115}
116
117
118/*
119 * For now, flush the whole cache. FIXME??
120 */
121
122void flush_cache_range(struct vm_area_struct* vma,
123 unsigned long start, unsigned long end)
124{
125 __flush_invalidate_dcache_all();
126 __invalidate_icache_all();
127}
128
129/*
130 * Remove any entry in the cache for this page.
131 *
132 * Note that this function is only called for user pages, so use the
133 * alias versions of the cache flush functions.
134 */
135
136void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
137 unsigned long pfn)
138{
139 /* Note that we have to use the 'alias' address to avoid multi-hit */
140
141 unsigned long phys = page_to_phys(pfn_to_page(pfn));
142 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
143
144 __flush_invalidate_dcache_page_alias(virt, phys);
145 __invalidate_icache_page_alias(virt, phys);
146}
147
148#endif
149
150void
151update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
152{
153 unsigned long pfn = pte_pfn(pte);
154 struct page *page;
155
156 if (!pfn_valid(pfn))
157 return;
158
159 page = pfn_to_page(pfn);
160
161 /* Invalidate old entry in TLBs */
162
163 invalidate_itlb_mapping(addr);
164 invalidate_dtlb_mapping(addr);
165
166#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
167
168 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
169
170 unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
171 unsigned long paddr = (unsigned long) page_address(page);
172 unsigned long phys = page_to_phys(page);
173
174 __flush_invalidate_dcache_page(paddr);
175
176 __flush_invalidate_dcache_page_alias(vaddr, phys);
177 __invalidate_icache_page_alias(vaddr, phys);
178
179 clear_bit(PG_arch_1, &page->flags);
180 }
181#else
182 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
183 && (vma->vm_flags & VM_EXEC) != 0) {
184 unsigned long vaddr = addr & PAGE_MASK;
185 __flush_dcache_page(vaddr);
186 __invalidate_icache_page(vaddr);
187 set_bit(PG_arch_1, &page->flags);
188 }
189#endif
190}
191
192/*
193 * access_process_vm() has called get_user_pages(), which has done a
194 * flush_dcache_page() on the page.
195 */
196
197#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
198
199void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
200 unsigned long vaddr, void *dst, const void *src,
201 unsigned long len)
202{
203 unsigned long phys = page_to_phys(page);
204 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
205
206 /* Flush and invalidate user page if aliased. */
207
208 if (alias) {
209 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
210 __flush_invalidate_dcache_page_alias(temp, phys);
211 }
212
213 /* Copy data */
214
215 memcpy(dst, src, len);
216
217 /*
218 * Flush and invalidate kernel page if aliased and synchronize
219 * data and instruction caches for executable pages.
220 */
221
222 if (alias) {
223 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
224
225 __flush_invalidate_dcache_range((unsigned long) dst, len);
226 if ((vma->vm_flags & VM_EXEC) != 0) {
227 __invalidate_icache_page_alias(temp, phys);
228 }
229
230 } else if ((vma->vm_flags & VM_EXEC) != 0) {
231 __flush_dcache_range((unsigned long)dst,len);
232 __invalidate_icache_range((unsigned long) dst, len);
233 }
234}
235
236extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
237 unsigned long vaddr, void *dst, const void *src,
238 unsigned long len)
239{
240 unsigned long phys = page_to_phys(page);
241 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
242
243 /*
244 * Flush user page if aliased.
245 * (Note: a simply flush would be sufficient)
246 */
247
248 if (alias) {
249 unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
250 __flush_invalidate_dcache_page_alias(temp, phys);
251 }
252
253 memcpy(dst, src, len);
254}
255
256#endif
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 16004067add3..45d28f217c03 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -24,6 +24,8 @@
24unsigned long asid_cache = ASID_USER_FIRST; 24unsigned long asid_cache = ASID_USER_FIRST;
25void bad_page_fault(struct pt_regs*, unsigned long, int); 25void bad_page_fault(struct pt_regs*, unsigned long, int);
26 26
27#undef DEBUG_PAGE_FAULT
28
27/* 29/*
28 * This routine handles page faults. It determines the address, 30 * This routine handles page faults. It determines the address,
29 * and the problem, and then passes it off to one of the appropriate 31 * and the problem, and then passes it off to one of the appropriate
@@ -64,7 +66,7 @@ void do_page_fault(struct pt_regs *regs)
64 exccause == EXCCAUSE_ITLB_MISS || 66 exccause == EXCCAUSE_ITLB_MISS ||
65 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; 67 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
66 68
67#if 0 69#ifdef DEBUG_PAGE_FAULT
68 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, 70 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
69 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); 71 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
70#endif 72#endif
@@ -219,7 +221,7 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
219 221
220 /* Are we prepared to handle this kernel fault? */ 222 /* Are we prepared to handle this kernel fault? */
221 if ((entry = search_exception_tables(regs->pc)) != NULL) { 223 if ((entry = search_exception_tables(regs->pc)) != NULL) {
222#if 1 224#ifdef DEBUG_PAGE_FAULT
223 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n", 225 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
224 current->comm, regs->pc, entry->fixup); 226 current->comm, regs->pc, entry->fixup);
225#endif 227#endif
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 8415c76f11c2..b3086f34a8e7 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -15,40 +15,24 @@
15 * Kevin Chea 15 * Kevin Chea
16 */ 16 */
17 17
18#include <linux/init.h>
19#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h> 18#include <linux/kernel.h>
22#include <linux/errno.h> 19#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/ptrace.h>
26#include <linux/bootmem.h> 20#include <linux/bootmem.h>
27#include <linux/swap.h> 21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/nodemask.h>
24#include <linux/mm.h>
25#include <linux/slab.h>
28 26
29#include <asm/pgtable.h> 27#include <asm/pgtable.h>
30#include <asm/bootparam.h> 28#include <asm/bootparam.h>
31#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
32#include <asm/tlb.h> 30#include <asm/tlb.h>
33#include <asm/tlbflush.h>
34#include <asm/page.h> 31#include <asm/page.h>
35#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
36#include <asm/pgtable.h>
37
38 33
39#define DEBUG 0
40 34
41DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42//static DEFINE_SPINLOCK(tlb_lock);
43
44/*
45 * This flag is used to indicate that the page was mapped and modified in
46 * kernel space, so the cache is probably dirty at that address.
47 * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
48 * synchronizes the caches if this bit is set.
49 */
50
51#define PG_cache_clean PG_arch_1
52 36
53/* References to section boundaries */ 37/* References to section boundaries */
54 38
@@ -323,228 +307,22 @@ void show_mem(void)
323 printk("%d free pages\n", free); 307 printk("%d free pages\n", free);
324} 308}
325 309
326/* ------------------------------------------------------------------------- */ 310struct kmem_cache *pgtable_cache __read_mostly;
327
328#if (DCACHE_WAY_SIZE > PAGE_SIZE)
329
330/*
331 * With cache aliasing, the page color of the page in kernel space and user
332 * space might mismatch. We temporarily map the page to a different virtual
333 * address with the same color and clear the page there.
334 */
335
336void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
337{
338
339 /* There shouldn't be any entries for this page. */
340
341 __flush_invalidate_dcache_page_phys(__pa(page_address(page)));
342
343 if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
344 unsigned long v, p;
345
346 /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
347
348 spin_lock(&tlb_lock);
349
350 p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
351 kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
352 v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
353 __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
354
355 clear_page(kaddr);
356
357 spin_unlock(&tlb_lock);
358 } else {
359 clear_page(kaddr);
360 }
361
362 /* We need to make sure that i$ and d$ are coherent. */
363
364 clear_bit(PG_cache_clean, &page->flags);
365}
366
367/*
368 * With cache aliasing, we have to make sure that the page color of the page
369 * in kernel space matches that of the virtual user address before we read
370 * the page. If the page color differ, we create a temporary DTLB entry with
371 * the corrent page color and use this 'temporary' address as the source.
372 * We then use the same approach as in clear_user_page and copy the data
373 * to the kernel space and clear the PG_cache_clean bit to synchronize caches
374 * later.
375 *
376 * Note:
377 * Instead of using another 'way' for the temporary DTLB entry, we could
378 * probably use the same entry that points to the kernel address (after
379 * saving the original value and restoring it when we are done).
380 */
381 311
382void copy_user_page(void* to, void* from, unsigned long vaddr, 312static void pgd_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
383 struct page* to_page)
384{ 313{
385 /* There shouldn't be any entries for the new page. */ 314 pte_t* ptep = (pte_t*)addr;
386 315 int i;
387 __flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
388
389 spin_lock(&tlb_lock);
390
391 if (!PAGE_COLOR_EQ(vaddr, from)) {
392 unsigned long v, p, t;
393
394 __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
395 : "=a"(p), "=a"(t) : "a"(from));
396 from = (void*)PAGE_COLOR_MAP0(vaddr);
397 v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
398 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
399 }
400
401 if (!PAGE_COLOR_EQ(vaddr, to)) {
402 unsigned long v, p;
403
404 p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
405 to = (void*)PAGE_COLOR_MAP1(vaddr);
406 v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
407 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
408 }
409 copy_page(to, from);
410
411 spin_unlock(&tlb_lock);
412
413 /* We need to make sure that i$ and d$ are coherent. */
414
415 clear_bit(PG_cache_clean, &to_page->flags);
416}
417
418
419
420/*
421 * Any time the kernel writes to a user page cache page, or it is about to
422 * read from a page cache page this routine is called.
423 *
424 * Note:
425 * The kernel currently only provides one architecture bit in the page
426 * flags that we use for I$/D$ coherency. Maybe, in future, we can
427 * use a sepearte bit for deferred dcache aliasing:
428 * If the page is not mapped yet, we only need to set a flag,
429 * if mapped, we need to invalidate the page.
430 */
431// FIXME: we probably need this for WB caches not only for Page Coloring..
432
433void flush_dcache_page(struct page *page)
434{
435 unsigned long addr = __pa(page_address(page));
436 struct address_space *mapping = page_mapping(page);
437
438 __flush_invalidate_dcache_page_phys(addr);
439
440 if (!test_bit(PG_cache_clean, &page->flags))
441 return;
442
443 /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
444#if 0
445 if (mapping && !mapping_mapped(mapping))
446 clear_bit(PG_cache_clean, &page->flags);
447 else
448#endif
449 __invalidate_icache_page_phys(addr);
450}
451
452void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
453 unsigned long e)
454{
455 __flush_invalidate_cache_all();
456}
457
458void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
459 unsigned long pfn)
460{
461 struct page *page = pfn_to_page(pfn);
462
463 /* Remove any entry for the old mapping. */
464
465 if (current->active_mm == vma->vm_mm) {
466 unsigned long addr = __pa(page_address(page));
467 __flush_invalidate_dcache_page_phys(addr);
468 if ((vma->vm_flags & VM_EXEC) != 0)
469 __invalidate_icache_page_phys(addr);
470 } else {
471 BUG();
472 }
473}
474
475#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
476
477
478pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
479{
480 pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
481 if (likely(pte)) {
482 pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
483 int i;
484 for (i = 0; i < 1024; i++, ptep++)
485 pte_clear(mm, addr, ptep);
486 }
487 return pte;
488}
489
490struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
491{
492 struct page *page;
493
494 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
495
496 if (likely(page)) {
497 pte_t* ptep = kmap_atomic(page, KM_USER0);
498 int i;
499 316
500 for (i = 0; i < 1024; i++, ptep++) 317 for (i = 0; i < 1024; i++, ptep++)
501 pte_clear(mm, addr, ptep); 318 pte_clear(NULL, 0, ptep);
502 319
503 kunmap_atomic(ptep, KM_USER0);
504 }
505 return page;
506} 320}
507 321
508 322void __init pgtable_cache_init(void)
509/*
510 * Handle D$/I$ coherency.
511 *
512 * Note:
513 * We only have one architecture bit for the page flags, so we cannot handle
514 * cache aliasing, yet.
515 */
516
517void
518update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
519{ 323{
520 unsigned long pfn = pte_pfn(pte); 324 pgtable_cache = kmem_cache_create("pgd",
521 struct page *page; 325 PAGE_SIZE, PAGE_SIZE,
522 unsigned long vaddr = addr & PAGE_MASK; 326 SLAB_HWCACHE_ALIGN,
523 327 pgd_ctor);
524 if (!pfn_valid(pfn))
525 return;
526
527 page = pfn_to_page(pfn);
528
529 invalidate_itlb_mapping(addr);
530 invalidate_dtlb_mapping(addr);
531
532 /* We have a new mapping. Use it. */
533
534 write_dtlb_entry(pte, dtlb_probe(addr));
535
536 /* If the processor can execute from this page, synchronize D$/I$. */
537
538 if ((vma->vm_flags & VM_EXEC) != 0) {
539
540 write_itlb_entry(pte, itlb_probe(addr));
541
542 /* Synchronize caches, if not clean. */
543
544 if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
545 __flush_dcache_page(vaddr);
546 __invalidate_icache_page(vaddr);
547 }
548 }
549} 328}
550
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index ae085332c607..e1f880368e32 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -7,29 +7,33 @@
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 * 9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 10 * Copyright (C) 2001 - 2007 Tensilica Inc.
11 * 11 *
12 * Chris Zankel <chris@zankel.net> 12 * Chris Zankel <chris@zankel.net>
13 */ 13 */
14 14
15/* Note: we might want to implement some of the loops as zero-overhead-loops,
16 * where applicable and if supported by the processor.
17 */
18 15
19#include <linux/linkage.h> 16#include <linux/linkage.h>
20#include <asm/page.h> 17#include <asm/page.h>
21#include <asm/pgtable.h> 18#include <asm/pgtable.h>
22#include <asm/asmmacro.h> 19#include <asm/asmmacro.h>
23#include <asm/cacheasm.h> 20#include <asm/cacheasm.h>
21#include <asm/tlbflush.h>
22
24 23
25/* clear_page (page) */ 24/*
25 * clear_page and clear_user_page are the same for non-cache-aliased configs.
26 *
27 * clear_page (unsigned long page)
28 * a2
29 */
26 30
27ENTRY(clear_page) 31ENTRY(clear_page)
28 entry a1, 16 32 entry a1, 16
29 addi a4, a2, PAGE_SIZE
30 movi a3, 0
31 33
321: s32i a3, a2, 0 34 movi a3, 0
35 __loopi a2, a7, PAGE_SIZE, 32
36 s32i a3, a2, 0
33 s32i a3, a2, 4 37 s32i a3, a2, 4
34 s32i a3, a2, 8 38 s32i a3, a2, 8
35 s32i a3, a2, 12 39 s32i a3, a2, 12
@@ -37,42 +41,277 @@ ENTRY(clear_page)
37 s32i a3, a2, 20 41 s32i a3, a2, 20
38 s32i a3, a2, 24 42 s32i a3, a2, 24
39 s32i a3, a2, 28 43 s32i a3, a2, 28
40 addi a2, a2, 32 44 __endla a2, a7, 32
41 blt a2, a4, 1b
42 45
43 retw 46 retw
44 47
45/* 48/*
49 * copy_page and copy_user_page are the same for non-cache-aliased configs.
50 *
46 * copy_page (void *to, void *from) 51 * copy_page (void *to, void *from)
47 * a2 a3 52 * a2 a3
48 */ 53 */
49 54
50ENTRY(copy_page) 55ENTRY(copy_page)
51 entry a1, 16 56 entry a1, 16
52 addi a4, a2, PAGE_SIZE
53
541: l32i a5, a3, 0
55 l32i a6, a3, 4
56 l32i a7, a3, 8
57 s32i a5, a2, 0
58 s32i a6, a2, 4
59 s32i a7, a2, 8
60 l32i a5, a3, 12
61 l32i a6, a3, 16
62 l32i a7, a3, 20
63 s32i a5, a2, 12
64 s32i a6, a2, 16
65 s32i a7, a2, 20
66 l32i a5, a3, 24
67 l32i a6, a3, 28
68 s32i a5, a2, 24
69 s32i a6, a2, 28
70 addi a2, a2, 32
71 addi a3, a3, 32
72 blt a2, a4, 1b
73 57
58 __loopi a2, a4, PAGE_SIZE, 32
59
60 l32i a8, a3, 0
61 l32i a9, a3, 4
62 s32i a8, a2, 0
63 s32i a9, a2, 4
64
65 l32i a8, a3, 8
66 l32i a9, a3, 12
67 s32i a8, a2, 8
68 s32i a9, a2, 12
69
70 l32i a8, a3, 16
71 l32i a9, a3, 20
72 s32i a8, a2, 16
73 s32i a9, a2, 20
74
75 l32i a8, a3, 24
76 l32i a9, a3, 28
77 s32i a8, a2, 24
78 s32i a9, a2, 28
79
80 addi a2, a2, 32
81 addi a3, a3, 32
82
83 __endl a2, a4
84
85 retw
86
87/*
88 * If we have to deal with cache aliasing, we use temporary memory mappings
89 * to ensure that the source and destination pages have the same color as
90 * the virtual address. We use way 0 and 1 for temporary mappings in such cases.
91 *
92 * The temporary DTLB entries shouldn't be flushed by interrupts, but are
93 * flushed by preemptive task switches. Special code in the
94 * fast_second_level_miss handler re-established the temporary mapping.
95 * It requires that the PPNs for the destination and source addresses are
96 * in a6, and a7, respectively.
97 */
98
99/* TLB miss exceptions are treated special in the following region */
100
101ENTRY(__tlbtemp_mapping_start)
102
103#if (DCACHE_WAY_SIZE > PAGE_SIZE)
104
105/*
106 * clear_user_page (void *addr, unsigned long vaddr, struct page *page)
107 * a2 a3 a4
108 */
109
110ENTRY(clear_user_page)
111 entry a1, 32
112
113 /* Mark page dirty and determine alias. */
114
115 movi a7, (1 << PG_ARCH_1)
116 l32i a5, a4, PAGE_FLAGS
117 xor a6, a2, a3
118 extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
119 extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
120 or a5, a5, a7
121 slli a3, a3, PAGE_SHIFT
122 s32i a5, a4, PAGE_FLAGS
123
124 /* Skip setting up a temporary DTLB if not aliased. */
125
126 beqz a6, 1f
127
128 /* Invalidate kernel page. */
129
130 mov a10, a2
131 call8 __invalidate_dcache_page
132
133 /* Setup a temporary DTLB with the color of the VPN */
134
135 movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
136 movi a5, TLBTEMP_BASE_1 # virt
137 add a6, a2, a4 # ppn
138 add a2, a5, a3 # add 'color'
139
140 wdtlb a6, a2
141 dsync
142
1431: movi a3, 0
144 __loopi a2, a7, PAGE_SIZE, 32
145 s32i a3, a2, 0
146 s32i a3, a2, 4
147 s32i a3, a2, 8
148 s32i a3, a2, 12
149 s32i a3, a2, 16
150 s32i a3, a2, 20
151 s32i a3, a2, 24
152 s32i a3, a2, 28
153 __endla a2, a7, 32
154
155 bnez a6, 1f
156 retw
157
158 /* We need to invalidate the temporary idtlb entry, if any. */
159
1601: addi a2, a2, -PAGE_SIZE
161 idtlb a2
162 dsync
163
164 retw
165
166/*
167 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
168 * a2 a3 a4 a5
169 */
170
171ENTRY(copy_user_page)
172
173 entry a1, 32
174
175 /* Mark page dirty and determine alias for destination. */
176
177 movi a8, (1 << PG_ARCH_1)
178 l32i a9, a5, PAGE_FLAGS
179 xor a6, a2, a4
180 xor a7, a3, a4
181 extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
182 extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
183 extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
184 or a9, a9, a8
185 slli a4, a4, PAGE_SHIFT
186 s32i a9, a5, PAGE_FLAGS
187 movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
188
189 beqz a6, 1f
190
191 /* Invalidate dcache */
192
193 mov a10, a2
194 call8 __invalidate_dcache_page
195
196 /* Setup a temporary DTLB with a matching color. */
197
198 movi a8, TLBTEMP_BASE_1 # base
199 add a6, a2, a5 # ppn
200 add a2, a8, a4 # add 'color'
201
202 wdtlb a6, a2
203 dsync
204
205 /* Skip setting up a temporary DTLB for destination if not aliased. */
206
2071: beqz a7, 1f
208
209 /* Setup a temporary DTLB with a matching color. */
210
211 movi a8, TLBTEMP_BASE_2 # base
212 add a7, a3, a5 # ppn
213 add a3, a8, a4
214 addi a8, a3, 1 # way1
215
216 wdtlb a7, a8
217 dsync
218
2191: __loopi a2, a4, PAGE_SIZE, 32
220
221 l32i a8, a3, 0
222 l32i a9, a3, 4
223 s32i a8, a2, 0
224 s32i a9, a2, 4
225
226 l32i a8, a3, 8
227 l32i a9, a3, 12
228 s32i a8, a2, 8
229 s32i a9, a2, 12
230
231 l32i a8, a3, 16
232 l32i a9, a3, 20
233 s32i a8, a2, 16
234 s32i a9, a2, 20
235
236 l32i a8, a3, 24
237 l32i a9, a3, 28
238 s32i a8, a2, 24
239 s32i a9, a2, 28
240
241 addi a2, a2, 32
242 addi a3, a3, 32
243
244 __endl a2, a4
245
246 /* We need to invalidate any temporary mapping! */
247
248 bnez a6, 1f
249 bnez a7, 2f
250 retw
251
2521: addi a2, a2, -PAGE_SIZE
253 idtlb a2
254 dsync
255 bnez a7, 2f
256 retw
257
2582: addi a3, a3, -PAGE_SIZE+1
259 idtlb a3
260 dsync
261
262 retw
263
264#endif
265
266#if (DCACHE_WAY_SIZE > PAGE_SIZE)
267
268/*
269 * void __flush_invalidate_dcache_page_alias (addr, phys)
270 * a2 a3
271 */
272
273ENTRY(__flush_invalidate_dcache_page_alias)
274 entry sp, 16
275
276 movi a7, 0 # required for exception handler
277 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
278 mov a4, a2
279 wdtlb a6, a2
280 dsync
281
282 ___flush_invalidate_dcache_page a2 a3
283
284 idtlb a4
285 dsync
286
287 retw
288
289#endif
290
291ENTRY(__tlbtemp_mapping_itlb)
292
293#if (ICACHE_WAY_SIZE > PAGE_SIZE)
294
295ENTRY(__invalidate_icache_page_alias)
296 entry sp, 16
297
298 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
299 mov a4, a2
300 witlb a6, a2
301 isync
302
303 ___invalidate_icache_page a2 a3
304
305 iitlb a4
306 isync
74 retw 307 retw
75 308
309#endif
310
311/* End of special treatment in tlb miss exception */
312
313ENTRY(__tlbtemp_mapping_end)
314
76/* 315/*
77 * void __invalidate_icache_page(ulong start) 316 * void __invalidate_icache_page(ulong start)
78 */ 317 */
@@ -121,8 +360,6 @@ ENTRY(__flush_dcache_page)
121 dsync 360 dsync
122 retw 361 retw
123 362
124
125
126/* 363/*
127 * void __invalidate_icache_range(ulong start, ulong size) 364 * void __invalidate_icache_range(ulong start, ulong size)
128 */ 365 */
@@ -168,7 +405,6 @@ ENTRY(__invalidate_dcache_range)
168 405
169 ___invalidate_dcache_range a2 a3 a4 406 ___invalidate_dcache_range a2 a3 a4
170 407
171
172 retw 408 retw
173 409
174/* 410/*
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
index 2f4f20ffe666..854677d0c3f6 100644
--- a/arch/xtensa/platform-iss/console.c
+++ b/arch/xtensa/platform-iss/console.c
@@ -20,7 +20,6 @@
20#include <linux/param.h> 20#include <linux/param.h>
21#include <linux/serial.h> 21#include <linux/serial.h>
22#include <linux/serialP.h> 22#include <linux/serialP.h>
23#include <linux/console.h>
24 23
25#include <asm/uaccess.h> 24#include <asm/uaccess.h>
26#include <asm/irq.h> 25#include <asm/irq.h>
diff --git a/include/asm-xtensa/bugs.h b/include/asm-xtensa/bugs.h
index c42285320133..69b29d198249 100644
--- a/include/asm-xtensa/bugs.h
+++ b/include/asm-xtensa/bugs.h
@@ -13,10 +13,6 @@
13#ifndef _XTENSA_BUGS_H 13#ifndef _XTENSA_BUGS_H
14#define _XTENSA_BUGS_H 14#define _XTENSA_BUGS_H
15 15
16#include <asm/processor.h> 16static void check_bugs(void) { }
17
18static void __init check_bugs(void)
19{
20}
21 17
22#endif /* _XTENSA_BUGS_H */ 18#endif /* _XTENSA_BUGS_H */
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h
index 1c4a78f29ae2..3bba2a540cf0 100644
--- a/include/asm-xtensa/cache.h
+++ b/include/asm-xtensa/cache.h
@@ -19,6 +19,15 @@
19 19
20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) 20#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) 21#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
22#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
23#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
24
25/* Maximum cache size per way. */
26#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
27# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
28#else
29# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
30#endif
22 31
23 32
24#endif /* _XTENSA_CACHE_H */ 33#endif /* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
index 22ef901b7845..b773c57e75a5 100644
--- a/include/asm-xtensa/cacheflush.h
+++ b/include/asm-xtensa/cacheflush.h
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * (C) 2001 - 2006 Tensilica Inc. 8 * (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_CACHEFLUSH_H 11#ifndef _XTENSA_CACHEFLUSH_H
@@ -18,10 +18,7 @@
18#include <asm/page.h> 18#include <asm/page.h>
19 19
20/* 20/*
21 * flush and invalidate data cache, invalidate instruction cache: 21 * Lo-level routines for cache flushing.
22 *
23 * __flush_invalidate_cache_all()
24 * __flush_invalidate_cache_range(from,sze)
25 * 22 *
26 * invalidate data or instruction cache: 23 * invalidate data or instruction cache:
27 * 24 *
@@ -40,26 +37,39 @@
40 * __flush_invalidate_dcache_all() 37 * __flush_invalidate_dcache_all()
41 * __flush_invalidate_dcache_page(adr) 38 * __flush_invalidate_dcache_page(adr)
42 * __flush_invalidate_dcache_range(from,size) 39 * __flush_invalidate_dcache_range(from,size)
40 *
41 * specials for cache aliasing:
42 *
43 * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44 * __invalidate_icache_page_alias(vaddr,paddr)
43 */ 45 */
44 46
45extern void __flush_invalidate_cache_all(void); 47extern void __invalidate_dcache_all(void);
46extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
47extern void __flush_invalidate_dcache_all(void);
48extern void __invalidate_icache_all(void); 48extern void __invalidate_icache_all(void);
49
50extern void __invalidate_dcache_page(unsigned long); 49extern void __invalidate_dcache_page(unsigned long);
51extern void __invalidate_icache_page(unsigned long); 50extern void __invalidate_icache_page(unsigned long);
52extern void __invalidate_icache_range(unsigned long, unsigned long); 51extern void __invalidate_icache_range(unsigned long, unsigned long);
53extern void __invalidate_dcache_range(unsigned long, unsigned long); 52extern void __invalidate_dcache_range(unsigned long, unsigned long);
54 53
54
55#if XCHAL_DCACHE_IS_WRITEBACK 55#if XCHAL_DCACHE_IS_WRITEBACK
56extern void __flush_invalidate_dcache_all(void);
56extern void __flush_dcache_page(unsigned long); 57extern void __flush_dcache_page(unsigned long);
58extern void __flush_dcache_range(unsigned long, unsigned long);
57extern void __flush_invalidate_dcache_page(unsigned long); 59extern void __flush_invalidate_dcache_page(unsigned long);
58extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); 60extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
59#else 61#else
60# define __flush_dcache_page(p) do { } while(0) 62# define __flush_dcache_range(p,s) do { } while(0)
61# define __flush_invalidate_dcache_page(p) do { } while(0) 63# define __flush_dcache_page(p) do { } while(0)
62# define __flush_invalidate_dcache_range(p,s) do { } while(0) 64# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66#endif
67
68#if (DCACHE_WAY_SIZE > PAGE_SIZE)
69extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70#endif
71#if (ICACHE_WAY_SIZE > PAGE_SIZE)
72extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
63#endif 73#endif
64 74
65/* 75/*
@@ -71,17 +81,21 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
71 * (see also Documentation/cachetlb.txt) 81 * (see also Documentation/cachetlb.txt)
72 */ 82 */
73 83
74#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 84#if (DCACHE_WAY_SIZE > PAGE_SIZE)
75 85
76#define flush_cache_all() __flush_invalidate_cache_all(); 86#define flush_cache_all() \
77#define flush_cache_mm(mm) __flush_invalidate_cache_all(); 87 do { \
78#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); 88 __flush_invalidate_dcache_all(); \
89 __invalidate_icache_all(); \
90 } while (0)
79 91
80#define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); 92#define flush_cache_mm(mm) flush_cache_all()
81#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); 93#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
82 94
83extern void flush_dcache_page(struct page*); 95#define flush_cache_vmap(start,end) flush_cache_all()
96#define flush_cache_vunmap(start,end) flush_cache_all()
84 97
98extern void flush_dcache_page(struct page*);
85extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); 99extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
86extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); 100extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
87 101
@@ -101,24 +115,39 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
101 115
102#endif 116#endif
103 117
118/* Ensure consistency between data and instruction cache. */
104#define flush_icache_range(start,end) \ 119#define flush_icache_range(start,end) \
105 __invalidate_icache_range(start,(end)-(start)) 120 do { \
121 __flush_dcache_range(start, (end) - (start)); \
122 __invalidate_icache_range(start,(end) - (start)); \
123 } while (0)
106 124
107/* This is not required, see Documentation/cachetlb.txt */ 125/* This is not required, see Documentation/cachetlb.txt */
108 126#define flush_icache_page(vma,page) do { } while (0)
109#define flush_icache_page(vma,page) do { } while(0)
110 127
111#define flush_dcache_mmap_lock(mapping) do { } while (0) 128#define flush_dcache_mmap_lock(mapping) do { } while (0)
112#define flush_dcache_mmap_unlock(mapping) do { } while (0) 129#define flush_dcache_mmap_unlock(mapping) do { } while (0)
113 130
131#if (DCACHE_WAY_SIZE > PAGE_SIZE)
114 132
115#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 133extern void copy_to_user_page(struct vm_area_struct*, struct page*,
116 memcpy(dst, src, len) 134 unsigned long, void*, const void*, unsigned long);
135extern void copy_from_user_page(struct vm_area_struct*, struct page*,
136 unsigned long, void*, const void*, unsigned long);
137
138#else
139
140#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
141 do { \
142 memcpy(dst, src, len); \
143 __flush_dcache_range((unsigned long) dst, len); \
144 __invalidate_icache_range((unsigned long) dst, len); \
145 } while (0)
117 146
118#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 147#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
119 memcpy(dst, src, len) 148 memcpy(dst, src, len)
120 149
121#endif /* __KERNEL__ */ 150#endif
122 151
152#endif /* __KERNEL__ */
123#endif /* _XTENSA_CACHEFLUSH_H */ 153#endif /* _XTENSA_CACHEFLUSH_H */
124
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h
index 1569b53cec91..7083d46766a8 100644
--- a/include/asm-xtensa/elf.h
+++ b/include/asm-xtensa/elf.h
@@ -20,6 +20,56 @@
20#define EM_XTENSA 94 20#define EM_XTENSA 94
21#define EM_XTENSA_OLD 0xABC7 21#define EM_XTENSA_OLD 0xABC7
22 22
23/* Xtensa relocations defined by the ABIs */
24
25#define R_XTENSA_NONE 0
26#define R_XTENSA_32 1
27#define R_XTENSA_RTLD 2
28#define R_XTENSA_GLOB_DAT 3
29#define R_XTENSA_JMP_SLOT 4
30#define R_XTENSA_RELATIVE 5
31#define R_XTENSA_PLT 6
32#define R_XTENSA_OP0 8
33#define R_XTENSA_OP1 9
34#define R_XTENSA_OP2 10
35#define R_XTENSA_ASM_EXPAND 11
36#define R_XTENSA_ASM_SIMPLIFY 12
37#define R_XTENSA_GNU_VTINHERIT 15
38#define R_XTENSA_GNU_VTENTRY 16
39#define R_XTENSA_DIFF8 17
40#define R_XTENSA_DIFF16 18
41#define R_XTENSA_DIFF32 19
42#define R_XTENSA_SLOT0_OP 20
43#define R_XTENSA_SLOT1_OP 21
44#define R_XTENSA_SLOT2_OP 22
45#define R_XTENSA_SLOT3_OP 23
46#define R_XTENSA_SLOT4_OP 24
47#define R_XTENSA_SLOT5_OP 25
48#define R_XTENSA_SLOT6_OP 26
49#define R_XTENSA_SLOT7_OP 27
50#define R_XTENSA_SLOT8_OP 28
51#define R_XTENSA_SLOT9_OP 29
52#define R_XTENSA_SLOT10_OP 30
53#define R_XTENSA_SLOT11_OP 31
54#define R_XTENSA_SLOT12_OP 32
55#define R_XTENSA_SLOT13_OP 33
56#define R_XTENSA_SLOT14_OP 34
57#define R_XTENSA_SLOT0_ALT 35
58#define R_XTENSA_SLOT1_ALT 36
59#define R_XTENSA_SLOT2_ALT 37
60#define R_XTENSA_SLOT3_ALT 38
61#define R_XTENSA_SLOT4_ALT 39
62#define R_XTENSA_SLOT5_ALT 40
63#define R_XTENSA_SLOT6_ALT 41
64#define R_XTENSA_SLOT7_ALT 42
65#define R_XTENSA_SLOT8_ALT 43
66#define R_XTENSA_SLOT9_ALT 44
67#define R_XTENSA_SLOT10_ALT 45
68#define R_XTENSA_SLOT11_ALT 46
69#define R_XTENSA_SLOT12_ALT 47
70#define R_XTENSA_SLOT13_ALT 48
71#define R_XTENSA_SLOT14_ALT 49
72
23/* ELF register definitions. This is needed for core dump support. */ 73/* ELF register definitions. This is needed for core dump support. */
24 74
25/* 75/*
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 0faa614d9696..47c3616ea9ac 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -14,6 +14,7 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <linux/kernel.h>
17 18
18#include <linux/types.h> 19#include <linux/types.h>
19 20
diff --git a/include/asm-xtensa/ioctls.h b/include/asm-xtensa/ioctls.h
index 39e6f23921bb..0ffa942954b9 100644
--- a/include/asm-xtensa/ioctls.h
+++ b/include/asm-xtensa/ioctls.h
@@ -91,6 +91,10 @@
91#define TIOCSBRK _IO('T', 39) /* BSD compatibility */ 91#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
92#define TIOCCBRK _IO('T', 40) /* BSD compatibility */ 92#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
93#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ 93#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
94#define TCGETS2 _IOR('T', 42, struct termios2)
95#define TCSETS2 _IOW('T', 43, struct termios2)
96#define TCSETSW2 _IOW('T', 44, struct termios2)
97#define TCSETSF2 _IOW('T', 45, struct termios2)
94#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 98#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
95#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 99#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
96 100
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 1213cde75438..55ce2c9749a3 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/page.h 2 * include/asm-xtensa/page.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version2 as 5 * it under the terms of the GNU General Public License version2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PAGE_H 11#ifndef _XTENSA_PAGE_H
@@ -14,6 +14,12 @@
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/types.h>
18#include <asm/cache.h>
19
20/*
21 * Fixed TLB translations in the processor.
22 */
17 23
18#define XCHAL_KSEG_CACHED_VADDR 0xd0000000 24#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
19#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 25#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
@@ -26,13 +32,60 @@
26 */ 32 */
27 33
28#define PAGE_SHIFT 12 34#define PAGE_SHIFT 12
29#define PAGE_SIZE (1 << PAGE_SHIFT) 35#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
30#define PAGE_MASK (~(PAGE_SIZE-1)) 36#define PAGE_MASK (~(PAGE_SIZE-1))
31#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) 37#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
32 38
33#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 39#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
34#define MAX_MEM_PFN XCHAL_KSEG_SIZE 40#define MAX_MEM_PFN XCHAL_KSEG_SIZE
35#define PGTABLE_START 0x80000000 41#define PGTABLE_START 0x80000000
42
43/*
44 * Cache aliasing:
45 *
46 * If the cache size for one way is greater than the page size, we have to
47 * deal with cache aliasing. The cache index is wider than the page size:
48 *
49 * | |cache| cache index
50 * | pfn |off| virtual address
51 * |xxxx:X|zzz|
52 * | : | |
53 * | \ / | |
54 * |trans.| |
55 * | / \ | |
56 * |yyyy:Y|zzz| physical address
57 *
58 * When the page number is translated to the physical page address, the lowest
59 * bit(s) (X) that are part of the cache index are also translated (Y).
60 * If this translation changes bit(s) (X), the cache index is also afected,
61 * thus resulting in a different cache line than before.
62 * The kernel does not provide a mechanism to ensure that the page color
63 * (represented by this bit) remains the same when allocated or when pages
64 * are remapped. When user pages are mapped into kernel space, the color of
65 * the page might also change.
66 *
67 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
68 * to temporarily map a patch so we can match the color.
69 */
70
71#if DCACHE_WAY_SIZE > PAGE_SIZE
72# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
73# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
74# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
75# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
76#else
77# define DCACHE_ALIAS_ORDER 0
78#endif
79
80#if ICACHE_WAY_SIZE > PAGE_SIZE
81# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
82# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
83# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
84# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
85#else
86# define ICACHE_ALIAS_ORDER 0
87#endif
88
36 89
37#ifdef __ASSEMBLY__ 90#ifdef __ASSEMBLY__
38 91
@@ -58,34 +111,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
58 111
59/* 112/*
60 * Pure 2^n version of get_order 113 * Pure 2^n version of get_order
114 * Use 'nsau' instructions if supported by the processor or the generic version.
61 */ 115 */
62 116
63static inline int get_order(unsigned long size) 117#if XCHAL_HAVE_NSA
118
119static inline __attribute_const__ int get_order(unsigned long size)
64{ 120{
65 int order; 121 int lz;
66#ifndef XCHAL_HAVE_NSU 122 asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
67 unsigned long x1, x2, x4, x8, x16; 123 return 32 - lz;
68
69 size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
70 x1 = size & 0xAAAAAAAA;
71 x2 = size & 0xCCCCCCCC;
72 x4 = size & 0xF0F0F0F0;
73 x8 = size & 0xFF00FF00;
74 x16 = size & 0xFFFF0000;
75 order = x2 ? 2 : 0;
76 order += (x16 != 0) * 16;
77 order += (x8 != 0) * 8;
78 order += (x4 != 0) * 4;
79 order += (x1 != 0);
80
81 return order;
82#else
83 size = (size - 1) >> PAGE_SHIFT;
84 asm ("nsau %0, %1" : "=r" (order) : "r" (size));
85 return 32 - order;
86#endif
87} 124}
88 125
126#else
127
128# include <asm-generic/page.h>
129
130#endif
89 131
90struct page; 132struct page;
91extern void clear_page(void *page); 133extern void clear_page(void *page);
@@ -96,11 +138,11 @@ extern void copy_page(void *to, void *from);
96 * some extra work 138 * some extra work
97 */ 139 */
98 140
99#if (DCACHE_WAY_SIZE > PAGE_SIZE) 141#if DCACHE_WAY_SIZE > PAGE_SIZE
100void clear_user_page(void *addr, unsigned long vaddr, struct page* page); 142extern void clear_user_page(void*, unsigned long, struct page*);
101void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); 143extern void copy_user_page(void*, void*, unsigned long, struct page*);
102#else 144#else
103# define clear_user_page(page,vaddr,pg) clear_page(page) 145# define clear_user_page(page, vaddr, pg) clear_page(page)
104# define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 146# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
105#endif 147#endif
106 148
diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h
index d56ddf2055e1..3e5b56525102 100644
--- a/include/asm-xtensa/pgalloc.h
+++ b/include/asm-xtensa/pgalloc.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/pgalloc.h 2 * include/asm-xtensa/pgalloc.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001-2005 Tensilica Inc. 8 * Copyright (C) 2001-2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PGALLOC_H 11#ifndef _XTENSA_PGALLOC_H
@@ -13,103 +13,54 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16#include <linux/threads.h>
17#include <linux/highmem.h> 16#include <linux/highmem.h>
18#include <asm/processor.h>
19#include <asm/cacheflush.h>
20
21
22/* Cache aliasing:
23 *
24 * If the cache size for one way is greater than the page size, we have to
25 * deal with cache aliasing. The cache index is wider than the page size:
26 *
27 * |cache |
28 * |pgnum |page| virtual address
29 * |xxxxxX|zzzz|
30 * | | |
31 * \ / | |
32 * trans.| |
33 * / \ | |
34 * |yyyyyY|zzzz| physical address
35 *
36 * When the page number is translated to the physical page address, the lowest
37 * bit(s) (X) that are also part of the cache index are also translated (Y).
38 * If this translation changes this bit (X), the cache index is also afected,
39 * thus resulting in a different cache line than before.
40 * The kernel does not provide a mechanism to ensure that the page color
41 * (represented by this bit) remains the same when allocated or when pages
42 * are remapped. When user pages are mapped into kernel space, the color of
43 * the page might also change.
44 *
45 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
46 * to temporarily map a patch so we can match the color.
47 */
48
49#if (DCACHE_WAY_SIZE > PAGE_SIZE)
50# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
51# define PAGE_COLOR(a) \
52 (((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
53# define PAGE_COLOR_EQ(a,b) \
54 ((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
55# define PAGE_COLOR_MAP0(v) \
56 (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
57# define PAGE_COLOR_MAP1(v) \
58 (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
59#endif
60 17
61/* 18/*
62 * Allocating and freeing a pmd is trivial: the 1-entry pmd is 19 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
63 * inside the pgd, so has no extra memory associated with it. 20 * inside the pgd, so has no extra memory associated with it.
64 */ 21 */
65 22
66#define pgd_free(pgd) free_page((unsigned long)(pgd)) 23#define pmd_populate_kernel(mm, pmdp, ptep) \
67 24 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
68#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 25#define pmd_populate(mm, pmdp, page) \
26 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
69 27
70static inline void 28static inline pgd_t*
71pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) 29pgd_alloc(struct mm_struct *mm)
72{ 30{
73 pmd_val(*(pmdp)) = (unsigned long)(pte); 31 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
74 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
75} 32}
76 33
77static inline void 34static inline void pgd_free(pgd_t *pgd)
78pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
79{ 35{
80 pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page); 36 free_page((unsigned long)pgd);
81 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
82} 37}
83 38
39/* Use a slab cache for the pte pages (see also sparc64 implementation) */
84 40
41extern struct kmem_cache *pgtable_cache;
85 42
86#else 43static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
87 44 unsigned long address)
88# define pmd_populate_kernel(mm, pmdp, pte) \
89 (pmd_val(*(pmdp)) = (unsigned long)(pte))
90# define pmd_populate(mm, pmdp, page) \
91 (pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
92
93#endif
94
95static inline pgd_t*
96pgd_alloc(struct mm_struct *mm)
97{ 45{
98 pgd_t *pgd; 46 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
99 47}
100 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
101
102 if (likely(pgd != NULL))
103 __flush_dcache_page((unsigned long)pgd);
104 48
105 return pgd; 49static inline struct page *pte_alloc_one(struct mm_struct *mm,
50 unsigned long addr)
51{
52 return virt_to_page(pte_alloc_one_kernel(mm, addr));
106} 53}
107 54
108extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); 55static inline void pte_free_kernel(pte_t *pte)
109extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); 56{
57 kmem_cache_free(pgtable_cache, pte);
58}
110 59
111#define pte_free_kernel(pte) free_page((unsigned long)pte) 60static inline void pte_free(struct page *page)
112#define pte_free(pte) __free_page(pte) 61{
62 kmem_cache_free(pgtable_cache, page_address(page));
63}
113 64
114#endif /* __KERNEL__ */ 65#endif /* __KERNEL__ */
115#endif /* _XTENSA_PGALLOC_H */ 66#endif /* _XTENSA_PGALLOC_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index 06850f3b26a7..c0fcc1c9660c 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * linux/include/asm-xtensa/pgtable.h 2 * include/asm-xtensa/pgtable.h
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 */ 9 */
10 10
11#ifndef _XTENSA_PGTABLE_H 11#ifndef _XTENSA_PGTABLE_H
@@ -23,7 +23,7 @@
23 23
24/* 24/*
25 * The Xtensa architecture port of Linux has a two-level page table system, 25 * The Xtensa architecture port of Linux has a two-level page table system,
26 * i.e. the logical three-level Linux page table layout are folded. 26 * i.e. the logical three-level Linux page table layout is folded.
27 * Each task has the following memory page tables: 27 * Each task has the following memory page tables:
28 * 28 *
29 * PGD table (page directory), ie. 3rd-level page table: 29 * PGD table (page directory), ie. 3rd-level page table:
@@ -43,6 +43,7 @@
43 * 43 *
44 * The individual pages are 4 kB big with special pages for the empty_zero_page. 44 * The individual pages are 4 kB big with special pages for the empty_zero_page.
45 */ 45 */
46
46#define PGDIR_SHIFT 22 47#define PGDIR_SHIFT 22
47#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
48#define PGDIR_MASK (~(PGDIR_SIZE-1)) 49#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -53,24 +54,26 @@
53 */ 54 */
54#define PTRS_PER_PTE 1024 55#define PTRS_PER_PTE 1024
55#define PTRS_PER_PTE_SHIFT 10 56#define PTRS_PER_PTE_SHIFT 10
56#define PTRS_PER_PMD 1
57#define PTRS_PER_PGD 1024 57#define PTRS_PER_PGD 1024
58#define PGD_ORDER 0 58#define PGD_ORDER 0
59#define PMD_ORDER 0
60#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 59#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
61#define FIRST_USER_ADDRESS 0 60#define FIRST_USER_ADDRESS 0
62#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) 61#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
63 62
64/* virtual memory area. We keep a distance to other memory regions to be 63/*
64 * Virtual memory area. We keep a distance to other memory regions to be
65 * on the safe side. We also use this area for cache aliasing. 65 * on the safe side. We also use this area for cache aliasing.
66 */ 66 */
67 67
68// FIXME: virtual memory area must be configuration-dependent
69
70#define VMALLOC_START 0xC0000000 68#define VMALLOC_START 0xC0000000
71#define VMALLOC_END 0xC7FF0000 69#define VMALLOC_END 0xC6FEFFFF
70#define TLBTEMP_BASE_1 0xC6FF0000
71#define TLBTEMP_BASE_2 0xC6FF8000
72#define MODULE_START 0xC7000000
73#define MODULE_END 0xC7FFFFFF
72 74
73/* Xtensa Linux config PTE layout (when present): 75/*
76 * Xtensa Linux config PTE layout (when present):
74 * 31-12: PPN 77 * 31-12: PPN
75 * 11-6: Software 78 * 11-6: Software
76 * 5-4: RING 79 * 5-4: RING
@@ -86,47 +89,55 @@
86 * See further below for PTE layout for swapped-out pages. 89 * See further below for PTE layout for swapped-out pages.
87 */ 90 */
88 91
89#define _PAGE_VALID (1<<0) /* hardware: page is accessible */ 92#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
90#define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ 93#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
94
95#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */
96#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */
91 97
92/* None of these cache modes include MP coherency: */ 98/* None of these cache modes include MP coherency: */
93#define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ 99#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
94#if XCHAL_DCACHE_IS_WRITEBACK 100#define _PAGE_CA_WB (1<<2) /* write-back */
95# define _PAGE_WRITEBACK (1<<2) /* write back */ 101#define _PAGE_CA_WT (2<<2) /* write-through */
96# define _PAGE_WRITETHRU (2<<2) /* write through */ 102#define _PAGE_CA_MASK (3<<2)
97#else 103#define _PAGE_INVALID (3<<2)
98# define _PAGE_WRITEBACK (1<<2) /* assume write through */
99# define _PAGE_WRITETHRU (1<<2)
100#endif
101#define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */
102#define _CACHE_MASK (3<<2)
103 104
104#define _PAGE_USER (1<<4) /* user access (ring=1) */ 105#define _PAGE_USER (1<<4) /* user access (ring=1) */
105#define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */
106 106
107/* Software */ 107/* Software */
108#define _PAGE_RW (1<<6) /* software: page writable */ 108#define _PAGE_WRITABLE_BIT 6
109#define _PAGE_WRITABLE (1<<6) /* software: page writable */
109#define _PAGE_DIRTY (1<<7) /* software: page dirty */ 110#define _PAGE_DIRTY (1<<7) /* software: page dirty */
110#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ 111#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
111#define _PAGE_FILE (1<<9) /* nonlinear file mapping*/
112 112
113#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) 113/* On older HW revisions, we always have to set bit 0 */
114#define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) 114#if XCHAL_HW_VERSION_MAJOR < 2000
115# define _PAGE_VALID (1<<0)
116#else
117# define _PAGE_VALID 0
118#endif
115 119
116#ifdef CONFIG_MMU 120#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
121#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
117 122
118# define PAGE_NONE __pgprot(_PAGE_PRESENT) 123#ifdef CONFIG_MMU
119# define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW)
120# define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
121# define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
122# define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE)
123# define PAGE_INVALID __pgprot(_PAGE_USER)
124 124
125# if (DCACHE_WAY_SIZE > PAGE_SIZE) 125#define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE)
126# define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) 126#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
127# else 127#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
128# define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) 128#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
129# endif 129#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
130#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
131#define PAGE_SHARED_EXEC \
132 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
133#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
134#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
135
136#if (DCACHE_WAY_SIZE > PAGE_SIZE)
137# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
138#else
139# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
140#endif
130 141
131#else /* no mmu */ 142#else /* no mmu */
132 143
@@ -145,23 +156,23 @@
145 * What follows is the closest we can get by reasonable means.. 156 * What follows is the closest we can get by reasonable means..
146 * See linux/mm/mmap.c for protection_map[] array that uses these definitions. 157 * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
147 */ 158 */
148#define __P000 PAGE_NONE /* private --- */ 159#define __P000 PAGE_NONE /* private --- */
149#define __P001 PAGE_READONLY /* private --r */ 160#define __P001 PAGE_READONLY /* private --r */
150#define __P010 PAGE_COPY /* private -w- */ 161#define __P010 PAGE_COPY /* private -w- */
151#define __P011 PAGE_COPY /* private -wr */ 162#define __P011 PAGE_COPY /* private -wr */
152#define __P100 PAGE_READONLY /* private x-- */ 163#define __P100 PAGE_READONLY_EXEC /* private x-- */
153#define __P101 PAGE_READONLY /* private x-r */ 164#define __P101 PAGE_READONLY_EXEC /* private x-r */
154#define __P110 PAGE_COPY /* private xw- */ 165#define __P110 PAGE_COPY_EXEC /* private xw- */
155#define __P111 PAGE_COPY /* private xwr */ 166#define __P111 PAGE_COPY_EXEC /* private xwr */
156 167
157#define __S000 PAGE_NONE /* shared --- */ 168#define __S000 PAGE_NONE /* shared --- */
158#define __S001 PAGE_READONLY /* shared --r */ 169#define __S001 PAGE_READONLY /* shared --r */
159#define __S010 PAGE_SHARED /* shared -w- */ 170#define __S010 PAGE_SHARED /* shared -w- */
160#define __S011 PAGE_SHARED /* shared -wr */ 171#define __S011 PAGE_SHARED /* shared -wr */
161#define __S100 PAGE_READONLY /* shared x-- */ 172#define __S100 PAGE_READONLY_EXEC /* shared x-- */
162#define __S101 PAGE_READONLY /* shared x-r */ 173#define __S101 PAGE_READONLY_EXEC /* shared x-r */
163#define __S110 PAGE_SHARED /* shared xw- */ 174#define __S110 PAGE_SHARED_EXEC /* shared xw- */
164#define __S111 PAGE_SHARED /* shared xwr */ 175#define __S111 PAGE_SHARED_EXEC /* shared xwr */
165 176
166#ifndef __ASSEMBLY__ 177#ifndef __ASSEMBLY__
167 178
@@ -183,35 +194,42 @@ extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
183#define pmd_page(pmd) virt_to_page(pmd_val(pmd)) 194#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
184 195
185/* 196/*
186 * The following only work if pte_present() is true. 197 * pte status.
187 */ 198 */
188#define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) 199#define pte_none(pte) (pte_val(pte) == _PAGE_INVALID)
189#define pte_present(pte) (pte_val(pte) & _PAGE_VALID) 200#define pte_present(pte) \
201 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \
202 || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE))
190#define pte_clear(mm,addr,ptep) \ 203#define pte_clear(mm,addr,ptep) \
191 do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) 204 do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0)
192 205
193#define pmd_none(pmd) (!pmd_val(pmd)) 206#define pmd_none(pmd) (!pmd_val(pmd))
194#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) 207#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
195#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
196#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 208#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
209#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
197 210
198/* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ 211static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
199
200static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
201static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 212static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
202static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 213static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
203static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 214static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
204static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } 215static inline pte_t pte_wrprotect(pte_t pte)
205static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 216 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
206static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 217static inline pte_t pte_mkclean(pte_t pte)
207static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 218 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
208static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 219static inline pte_t pte_mkold(pte_t pte)
209static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } 220 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
221static inline pte_t pte_mkdirty(pte_t pte)
222 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
223static inline pte_t pte_mkyoung(pte_t pte)
224 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
225static inline pte_t pte_mkwrite(pte_t pte)
226 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
210 227
211/* 228/*
212 * Conversion functions: convert a page and protection to a page entry, 229 * Conversion functions: convert a page and protection to a page entry,
213 * and a page entry and page directory to the page they refer to. 230 * and a page entry and page directory to the page they refer to.
214 */ 231 */
232
215#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 233#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
216#define pte_same(a,b) (pte_val(a) == pte_val(b)) 234#define pte_same(a,b) (pte_val(a) == pte_val(b))
217#define pte_page(x) pfn_to_page(pte_pfn(x)) 235#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -232,8 +250,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
232{ 250{
233 *ptep = pteval; 251 *ptep = pteval;
234#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 252#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
235 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); 253 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
236#endif 254#endif
255
237} 256}
238 257
239struct mm_struct; 258struct mm_struct;
@@ -249,9 +268,6 @@ static inline void
249set_pmd(pmd_t *pmdp, pmd_t pmdval) 268set_pmd(pmd_t *pmdp, pmd_t pmdval)
250{ 269{
251 *pmdp = pmdval; 270 *pmdp = pmdval;
252#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
253 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
254#endif
255} 271}
256 272
257struct vm_area_struct; 273struct vm_area_struct;
@@ -306,52 +322,34 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
306 322
307/* 323/*
308 * Encode and decode a swap entry. 324 * Encode and decode a swap entry.
309 * Each PTE in a process VM's page table is either:
310 * "present" -- valid and not swapped out, protection bits are meaningful;
311 * "not present" -- which further subdivides in these two cases:
312 * "none" -- no mapping at all; identified by pte_none(), set by pte_clear(
313 * "swapped out" -- the page is swapped out, and the SWP macros below
314 * are used to store swap file info in the PTE itself.
315 * 325 *
316 * In the Xtensa processor MMU, any PTE entries in user space (or anywhere 326 * Format of swap pte:
317 * in virtual memory that can map differently across address spaces) 327 * bit 0 MBZ
318 * must have a correct ring value that represents the RASID field that 328 * bit 1 page-file (must be zero)
319 * is changed when switching address spaces. Eg. such PTE entries cannot 329 * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
320 * be set to ring zero, because that can cause a (global) kernel ASID 330 * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
321 * entry to be created in the TLBs (even with invalid cache attribute), 331 * bits 6 - 10 swap type (5 bits -> 32 types)
322 * potentially causing a multihit exception when going back to another 332 * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB)
323 * address space that mapped the same virtual address at another ring. 333
324 * 334 * Format of file pte:
325 * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. 335 * bit 0 MBZ
326 * We also avoid using the _PAGE_VALID bit which must be zero for non-present 336 * bit 1 page-file (must be one: _PAGE_FILE)
327 * pages. 337 * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
328 * 338 * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
329 * We end up with the following available bits: 1..3 and 7..31. 339 * bits 6 - 31 file offset / PAGE_SIZE
330 * We don't bother with 1..3 for now (we can use them later if needed),
331 * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits
332 * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it
333 * is currently implemented as an index into swap_info[MAX_SWAPFILES]
334 * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>.
335 * However, for some reason all other architectures in the 2.4 kernel
336 * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :)
337 * SWP_OFFSET is an offset into the swap file in page-size units, so
338 * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB.
339 *
340 * FIXME: 2 GB isn't very big. Other bits can be used to allow
341 * larger swap sizes. In the meantime, it appears relatively easy to get
342 * around the 2 GB limitation by simply using multiple swap files.
343 */ 340 */
344 341
345#define __swp_type(entry) (((entry).val >> 7) & 0x3f) 342#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
346#define __swp_offset(entry) ((entry).val >> 13) 343#define __swp_offset(entry) ((entry).val >> 11)
347#define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) 344#define __swp_entry(type,offs) \
345 ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID})
348#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 346#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
349#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 347#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
350 348
351#define PTE_FILE_MAX_BITS 29 349#define PTE_FILE_MAX_BITS 28
352#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 350#define pte_to_pgoff(pte) (pte_val(pte) >> 4)
353#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 351#define pgoff_to_pte(off) \
354 352 ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE })
355 353
356#endif /* !defined (__ASSEMBLY__) */ 354#endif /* !defined (__ASSEMBLY__) */
357 355
@@ -394,13 +392,12 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
394 * remap a physical page `pfn' of size `size' with page protection `prot' 392 * remap a physical page `pfn' of size `size' with page protection `prot'
395 * into virtual address `from' 393 * into virtual address `from'
396 */ 394 */
395
397#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 396#define io_remap_pfn_range(vma,from,pfn,size,prot) \
398 remap_pfn_range(vma, from, pfn, size, prot) 397 remap_pfn_range(vma, from, pfn, size, prot)
399 398
400 399
401/* No page table caches to init */ 400extern void pgtable_cache_init(void);
402
403#define pgtable_cache_init() do { } while (0)
404 401
405typedef pte_t *pte_addr_t; 402typedef pte_t *pte_addr_t;
406 403
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 4feb9f7f35a6..35145bcd96eb 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -33,7 +33,7 @@
33 * the 1 GB requirement applies to the stack as well. 33 * the 1 GB requirement applies to the stack as well.
34 */ 34 */
35 35
36#define TASK_SIZE 0x40000000 36#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
37 37
38/* 38/*
39 * General exception cause assigned to debug exceptions. Debug exceptions go 39 * General exception cause assigned to debug exceptions. Debug exceptions go
diff --git a/include/asm-xtensa/syscall.h b/include/asm-xtensa/syscall.h
index 6cb0d42f11c8..05cebf8f62b1 100644
--- a/include/asm-xtensa/syscall.h
+++ b/include/asm-xtensa/syscall.h
@@ -1,3 +1,13 @@
1/*
2 * include/asm-xtensa/syscall.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2007 Tensilica Inc.
9 */
10
1struct pt_regs; 11struct pt_regs;
2struct sigaction; 12struct sigaction;
3asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); 13asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
@@ -17,4 +27,16 @@ asmlinkage long sys_rt_sigaction(int,
17 const struct sigaction __user *, 27 const struct sigaction __user *,
18 struct sigaction __user *, 28 struct sigaction __user *,
19 size_t); 29 size_t);
20asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg); 30asmlinkage long xtensa_shmat(int, char __user *, int);
31asmlinkage long xtensa_fadvise64_64(int, int,
32 unsigned long long, unsigned long long);
33
34/* Should probably move to linux/syscalls.h */
35struct pollfd;
36asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
37 fd_set __user *exp, struct timespec __user *tsp, void __user *sig);
38asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
39 struct timespec __user *tsp, const sigset_t __user *sigmask,
40 size_t sigsetsize);
41
42
diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h
index 9972c25ec86f..85aa6a3c0b6e 100644
--- a/include/asm-xtensa/termbits.h
+++ b/include/asm-xtensa/termbits.h
@@ -157,6 +157,7 @@ struct ktermios {
157#define HUPCL 0002000 157#define HUPCL 0002000
158#define CLOCAL 0004000 158#define CLOCAL 0004000
159#define CBAUDEX 0010000 159#define CBAUDEX 0010000
160#define BOTHER 0010000
160#define B57600 0010001 161#define B57600 0010001
161#define B115200 0010002 162#define B115200 0010002
162#define B230400 0010003 163#define B230400 0010003
@@ -172,10 +173,12 @@ struct ktermios {
172#define B3000000 0010015 173#define B3000000 0010015
173#define B3500000 0010016 174#define B3500000 0010016
174#define B4000000 0010017 175#define B4000000 0010017
175#define CIBAUD 002003600000 /* input baud rate (not used) */ 176#define CIBAUD 002003600000 /* input baud rate */
176#define CMSPAR 010000000000 /* mark or space (stick) parity */ 177#define CMSPAR 010000000000 /* mark or space (stick) parity */
177#define CRTSCTS 020000000000 /* flow control */ 178#define CRTSCTS 020000000000 /* flow control */
178 179
180#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
181
179/* c_lflag bits */ 182/* c_lflag bits */
180 183
181#define ISIG 0000001 184#define ISIG 0000001
diff --git a/include/asm-xtensa/termios.h b/include/asm-xtensa/termios.h
index f14b42c8dac0..4673f42f88a7 100644
--- a/include/asm-xtensa/termios.h
+++ b/include/asm-xtensa/termios.h
@@ -95,8 +95,10 @@ struct termio {
95 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ 95 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
96}) 96})
97 97
98#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 98#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
99#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 99#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
100#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
101#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
100 102
101#endif /* __KERNEL__ */ 103#endif /* __KERNEL__ */
102 104
diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h
index 28c7985a4000..a5fca59fba9e 100644
--- a/include/asm-xtensa/timex.h
+++ b/include/asm-xtensa/timex.h
@@ -41,10 +41,10 @@
41extern unsigned long ccount_per_jiffy; 41extern unsigned long ccount_per_jiffy;
42extern unsigned long ccount_nsec; 42extern unsigned long ccount_nsec;
43#define CCOUNT_PER_JIFFY ccount_per_jiffy 43#define CCOUNT_PER_JIFFY ccount_per_jiffy
44#define CCOUNT_NSEC ccount_nsec 44#define NSEC_PER_CCOUNT ccount_nsec
45#else 45#else
46#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) 46#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
47#define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK) 47#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK)
48#endif 48#endif
49 49
50 50
diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h
index 4562b2dcfbc0..4830232017af 100644
--- a/include/asm-xtensa/tlb.h
+++ b/include/asm-xtensa/tlb.h
@@ -11,14 +11,36 @@
11#ifndef _XTENSA_TLB_H 11#ifndef _XTENSA_TLB_H
12#define _XTENSA_TLB_H 12#define _XTENSA_TLB_H
13 13
14#define tlb_start_vma(tlb,vma) do { } while (0) 14#include <asm/cache.h>
15#define tlb_end_vma(tlb,vma) do { } while (0) 15#include <asm/page.h>
16#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) 16
17#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
18
19/* Note, read http://lkml.org/lkml/2004/1/15/6 */
20
21# define tlb_start_vma(tlb,vma) do { } while (0)
22# define tlb_end_vma(tlb,vma) do { } while (0)
23
24#else
17 25
26# define tlb_start_vma(tlb, vma) \
27 do { \
28 if (!tlb->fullmm) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
30 } while(0)
31
32# define tlb_end_vma(tlb, vma) \
33 do { \
34 if (!tlb->fullmm) \
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
36 } while(0)
37
38#endif
39
40#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
18#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 41#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
19 42
20#include <asm-generic/tlb.h> 43#include <asm-generic/tlb.h>
21#include <asm/page.h>
22 44
23#define __pte_free_tlb(tlb,pte) pte_free(pte) 45#define __pte_free_tlb(tlb,pte) pte_free(pte)
24 46
diff --git a/include/asm-xtensa/types.h b/include/asm-xtensa/types.h
index 9d99a8e9e337..f1e84526f999 100644
--- a/include/asm-xtensa/types.h
+++ b/include/asm-xtensa/types.h
@@ -11,6 +11,15 @@
11#ifndef _XTENSA_TYPES_H 11#ifndef _XTENSA_TYPES_H
12#define _XTENSA_TYPES_H 12#define _XTENSA_TYPES_H
13 13
14
15#ifdef __ASSEMBLY__
16# define __XTENSA_UL(x) (x)
17# define __XTENSA_UL_CONST(x) x
18#else
19# define __XTENSA_UL(x) ((unsigned long)(x))
20# define __XTENSA_UL_CONST(x) x##UL
21#endif
22
14#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
15 24
16typedef unsigned short umode_t; 25typedef unsigned short umode_t;
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 9bd34024431c..92968aabe34e 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -151,7 +151,7 @@ __SYSCALL( 61, sys_fcntl64, 3)
151#define __NR_available62 62 151#define __NR_available62 62
152__SYSCALL( 62, sys_ni_syscall, 0) 152__SYSCALL( 62, sys_ni_syscall, 0)
153#define __NR_fadvise64_64 63 153#define __NR_fadvise64_64 63
154__SYSCALL( 63, sys_fadvise64_64, 6) 154__SYSCALL( 63, xtensa_fadvise64_64, 6)
155#define __NR_utime 64 /* glibc 2.3.3 ?? */ 155#define __NR_utime 64 /* glibc 2.3.3 ?? */
156__SYSCALL( 64, sys_utime, 2) 156__SYSCALL( 64, sys_utime, 2)
157#define __NR_utimes 65 157#define __NR_utimes 65
@@ -339,8 +339,8 @@ __SYSCALL(148, sys_setpgid, 2)
339__SYSCALL(149, sys_getpgid, 1) 339__SYSCALL(149, sys_getpgid, 1)
340#define __NR_getppid 150 340#define __NR_getppid 150
341__SYSCALL(150, sys_getppid, 0) 341__SYSCALL(150, sys_getppid, 0)
342#define __NR_available151 151 342#define __NR_getpgrp 151
343__SYSCALL(151, sys_ni_syscall, 0) 343__SYSCALL(151, sys_getpgrp, 0)
344 344
345#define __NR_reserved152 152 /* set_thread_area */ 345#define __NR_reserved152 152 /* set_thread_area */
346__SYSCALL(152, sys_ni_syscall, 0) 346__SYSCALL(152, sys_ni_syscall, 0)
@@ -577,7 +577,112 @@ __SYSCALL(258, sys_keyctl, 5)
577#define __NR_available259 259 577#define __NR_available259 259
578__SYSCALL(259, sys_ni_syscall, 0) 578__SYSCALL(259, sys_ni_syscall, 0)
579 579
580#define __NR_syscall_count 261 580
581#define __NR_readahead 260
582__SYSCALL(260, sys_readahead, 5)
583#define __NR_remap_file_pages 261
584__SYSCALL(261, sys_remap_file_pages, 5)
585#define __NR_migrate_pages 262
586__SYSCALL(262, sys_migrate_pages, 0)
587#define __NR_mbind 263
588__SYSCALL(263, sys_mbind, 6)
589#define __NR_get_mempolicy 264
590__SYSCALL(264, sys_get_mempolicy, 5)
591#define __NR_set_mempolicy 265
592__SYSCALL(265, sys_set_mempolicy, 3)
593#define __NR_unshare 266
594__SYSCALL(266, sys_unshare, 1)
595#define __NR_move_pages 267
596__SYSCALL(267, sys_move_pages, 0)
597#define __NR_splice 268
598__SYSCALL(268, sys_splice, 0)
599#define __NR_tee 269
600__SYSCALL(269, sys_tee, 0)
601#define __NR_vmsplice 270
602__SYSCALL(270, sys_vmsplice, 0)
603#define __NR_available271 271
604__SYSCALL(271, sys_ni_syscall, 0)
605
606#define __NR_pselect6 272
607__SYSCALL(272, sys_pselect6, 0)
608#define __NR_ppoll 273
609__SYSCALL(273, sys_ppoll, 0)
610#define __NR_epoll_pwait 274
611__SYSCALL(274, sys_epoll_pwait, 0)
612#define __NR_available275 275
613__SYSCALL(275, sys_ni_syscall, 0)
614
615#define __NR_inotify_init 276
616__SYSCALL(276, sys_inotify_init, 0)
617#define __NR_inotify_add_watch 277
618__SYSCALL(277, sys_inotify_add_watch, 3)
619#define __NR_inotify_rm_watch 278
620__SYSCALL(278, sys_inotify_rm_watch, 2)
621#define __NR_available279 279
622__SYSCALL(279, sys_ni_syscall, 0)
623
624#define __NR_getcpu 280
625__SYSCALL(280, sys_getcpu, 0)
626#define __NR_kexec_load 281
627__SYSCALL(281, sys_ni_syscall, 0)
628
629#define __NR_ioprio_set 282
630__SYSCALL(282, sys_ioprio_set, 2)
631#define __NR_ioprio_get 283
632__SYSCALL(283, sys_ioprio_get, 3)
633
634#define __NR_set_robust_list 284
635__SYSCALL(284, sys_set_robust_list, 3)
636#define __NR_get_robust_list 285
637__SYSCALL(285, sys_get_robust_list, 3)
638#define __NR_reserved286 286 /* sync_file_rangeX */
639__SYSCALL(286, sys_ni_syscall, 3)
640#define __NR_available287 287
641__SYSCALL(287, sys_faccessat, 0)
642
643/* Relative File Operations */
644
645#define __NR_openat 288
646__SYSCALL(288, sys_openat, 4)
647#define __NR_mkdirat 289
648__SYSCALL(289, sys_mkdirat, 3)
649#define __NR_mknodat 290
650__SYSCALL(290, sys_mknodat, 4)
651#define __NR_unlinkat 291
652__SYSCALL(291, sys_unlinkat, 3)
653#define __NR_renameat 292
654__SYSCALL(292, sys_renameat, 4)
655#define __NR_linkat 293
656__SYSCALL(293, sys_linkat, 5)
657#define __NR_symlinkat 294
658__SYSCALL(294, sys_symlinkat, 3)
659#define __NR_readlinkat 295
660__SYSCALL(295, sys_readlinkat, 4)
661#define __NR_utimensat 296
662__SYSCALL(296, sys_utimensat, 0)
663#define __NR_fchownat 297
664__SYSCALL(297, sys_fchownat, 5)
665#define __NR_futimesat 298
666__SYSCALL(298, sys_futimesat, 4)
667#define __NR_fstatat64 299
668__SYSCALL(299, sys_fstatat64, 0)
669#define __NR_fchmodat 300
670__SYSCALL(300, sys_fchmodat, 4)
671#define __NR_faccessat 301
672__SYSCALL(301, sys_faccessat, 4)
673#define __NR_available302 302
674__SYSCALL(302, sys_ni_syscall, 0)
675#define __NR_available303 303
676__SYSCALL(303, sys_ni_syscall, 0)
677
678#define __NR_signalfd 304
679__SYSCALL(304, sys_signalfd, 3)
680#define __NR_timerfd 305
681__SYSCALL(305, sys_timerfd, 4)
682#define __NR_eventfd 306
683__SYSCALL(306, sys_eventfd, 1)
684
685#define __NR_syscall_count 307
581 686
582/* 687/*
583 * sysxtensa syscall handler 688 * sysxtensa syscall handler
@@ -612,8 +717,19 @@ __SYSCALL(259, sys_ni_syscall, 0)
612#define __ARCH_WANT_SYS_LLSEEK 717#define __ARCH_WANT_SYS_LLSEEK
613#define __ARCH_WANT_SYS_RT_SIGACTION 718#define __ARCH_WANT_SYS_RT_SIGACTION
614#define __ARCH_WANT_SYS_RT_SIGSUSPEND 719#define __ARCH_WANT_SYS_RT_SIGSUSPEND
720#define __ARCH_WANT_SYS_GETPGRP
615 721
616#endif /* __KERNEL__ */ 722/*
723 * Ignore legacy system calls in the checksyscalls.sh script
724 */
617 725
618#endif /* _XTENSA_UNISTD_H */ 726#define __IGNORE_fork /* use clone */
727#define __IGNORE_time
728#define __IGNORE_alarm /* use setitimer */
729#define __IGNORE_pause
730#define __IGNORE_mmap /* use mmap2 */
731#define __IGNORE_vfork /* use clone */
732#define __IGNORE_fadvise64 /* use fadvise64_64 */
619 733
734#endif /* __KERNEL__ */
735#endif /* _XTENSA_UNISTD_H */