aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m68k/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-01-08 19:56:37 -0500
committerPaul Mundt <lethal@linux-sh.org>2012-01-08 19:56:37 -0500
commit04cf399640b7acfa9abe2eb7900cd934db8af697 (patch)
treef9a055f2f0170550f5f0b0507b06ffce8d98945d /arch/m68k/include
parent17f0056e6a2f3d1818801705f5e12b71217bf4ef (diff)
parenta0e86bd4252519321b0d102dc4ed90557aa7bee9 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into rmobile-latest
Conflicts: arch/arm/mach-shmobile/Makefile Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/anchor.h112
-rw-r--r--arch/m68k/include/asm/atarihw.h2
-rw-r--r--arch/m68k/include/asm/atomic.h10
-rw-r--r--arch/m68k/include/asm/blinken.h8
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h88
-rw-r--r--arch/m68k/include/asm/checksum.h31
-rw-r--r--arch/m68k/include/asm/div64.h8
-rw-r--r--arch/m68k/include/asm/elf.h6
-rw-r--r--arch/m68k/include/asm/entry.h10
-rw-r--r--arch/m68k/include/asm/fpu.h2
-rw-r--r--arch/m68k/include/asm/gpio.h3
-rw-r--r--arch/m68k/include/asm/ipcbuf.h30
-rw-r--r--arch/m68k/include/asm/irq.h5
-rw-r--r--arch/m68k/include/asm/m54xxacr.h32
-rw-r--r--arch/m68k/include/asm/mac_baboon.h6
-rw-r--r--arch/m68k/include/asm/mac_iop.h2
-rw-r--r--arch/m68k/include/asm/mac_oss.h23
-rw-r--r--arch/m68k/include/asm/mac_psc.h4
-rw-r--r--arch/m68k/include/asm/mac_via.h9
-rw-r--r--arch/m68k/include/asm/macintosh.h10
-rw-r--r--arch/m68k/include/asm/macints.h6
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h102
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h425
-rw-r--r--arch/m68k/include/asm/mcfmmu.h112
-rw-r--r--arch/m68k/include/asm/mmu_context.h250
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h1
-rw-r--r--arch/m68k/include/asm/page.h10
-rw-r--r--arch/m68k/include/asm/page_no.h3
-rw-r--r--arch/m68k/include/asm/page_offset.h10
-rw-r--r--arch/m68k/include/asm/pgalloc.h4
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h30
-rw-r--r--arch/m68k/include/asm/processor.h18
-rw-r--r--arch/m68k/include/asm/segment.h30
-rw-r--r--arch/m68k/include/asm/serial.h2
-rw-r--r--arch/m68k/include/asm/setup.h14
-rw-r--r--arch/m68k/include/asm/sigcontext.h4
-rw-r--r--arch/m68k/include/asm/socket.h3
-rw-r--r--arch/m68k/include/asm/thread_info.h34
-rw-r--r--arch/m68k/include/asm/tlbflush.h23
-rw-r--r--arch/m68k/include/asm/traps.h1
-rw-r--r--arch/m68k/include/asm/types.h6
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h42
-rw-r--r--arch/m68k/include/asm/ucontext.h4
-rw-r--r--arch/m68k/include/asm/unistd.h14
44 files changed, 1171 insertions, 378 deletions
diff --git a/arch/m68k/include/asm/anchor.h b/arch/m68k/include/asm/anchor.h
deleted file mode 100644
index 871c0d5cfc3d..000000000000
--- a/arch/m68k/include/asm/anchor.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/****************************************************************************/
2
3/*
4 * anchor.h -- Anchor CO-MEM Lite PCI host bridge part.
5 *
6 * (C) Copyright 2000, Moreton Bay (www.moreton.com.au)
7 */
8
9/****************************************************************************/
10#ifndef anchor_h
11#define anchor_h
12/****************************************************************************/
13
14/*
15 * Define basic addressing info.
16 */
17#if defined(CONFIG_M5407C3)
18#define COMEM_BASE 0xFFFF0000 /* Base of CO-MEM address space */
19#define COMEM_IRQ 25 /* IRQ of anchor part */
20#else
21#define COMEM_BASE 0x80000000 /* Base of CO-MEM address space */
22#define COMEM_IRQ 25 /* IRQ of anchor part */
23#endif
24
25/****************************************************************************/
26
27/*
28 * 4-byte registers of CO-MEM, so adjust register addresses for
29 * easy access. Handy macro for word access too.
30 */
31#define LREG(a) ((a) >> 2)
32#define WREG(a) ((a) >> 1)
33
34
35/*
36 * Define base addresses within CO-MEM Lite register address space.
37 */
38#define COMEM_I2O 0x0000 /* I2O registers */
39#define COMEM_OPREGS 0x0400 /* Operation registers */
40#define COMEM_PCIBUS 0x2000 /* Direct access to PCI bus */
41#define COMEM_SHMEM 0x4000 /* Shared memory region */
42
43#define COMEM_SHMEMSIZE 0x4000 /* Size of shared memory */
44
45
46/*
47 * Define CO-MEM Registers.
48 */
49#define COMEM_I2OHISR 0x0030 /* I2O host interrupt status */
50#define COMEM_I2OHIMR 0x0034 /* I2O host interrupt mask */
51#define COMEM_I2OLISR 0x0038 /* I2O local interrupt status */
52#define COMEM_I2OLIMR 0x003c /* I2O local interrupt mask */
53#define COMEM_IBFPFIFO 0x0040 /* I2O inbound free/post FIFO */
54#define COMEM_OBPFFIFO 0x0044 /* I2O outbound post/free FIFO */
55#define COMEM_IBPFFIFO 0x0048 /* I2O inbound post/free FIFO */
56#define COMEM_OBFPFIFO 0x004c /* I2O outbound free/post FIFO */
57
58#define COMEM_DAHBASE 0x0460 /* Direct access base address */
59
60#define COMEM_NVCMD 0x04a0 /* I2C serial command */
61#define COMEM_NVREAD 0x04a4 /* I2C serial read */
62#define COMEM_NVSTAT 0x04a8 /* I2C status */
63
64#define COMEM_DMALBASE 0x04b0 /* DMA local base address */
65#define COMEM_DMAHBASE 0x04b4 /* DMA host base address */
66#define COMEM_DMASIZE 0x04b8 /* DMA size */
67#define COMEM_DMACTL 0x04bc /* DMA control */
68
69#define COMEM_HCTL 0x04e0 /* Host control */
70#define COMEM_HINT 0x04e4 /* Host interrupt control/status */
71#define COMEM_HLDATA 0x04e8 /* Host to local data mailbox */
72#define COMEM_LINT 0x04f4 /* Local interrupt contole status */
73#define COMEM_LHDATA 0x04f8 /* Local to host data mailbox */
74
75#define COMEM_LBUSCFG 0x04fc /* Local bus configuration */
76
77
78/*
79 * Commands and flags for use with Direct Access Register.
80 */
81#define COMEM_DA_IACK 0x00000000 /* Interrupt acknowledge (read) */
82#define COMEM_DA_SPCL 0x00000010 /* Special cycle (write) */
83#define COMEM_DA_MEMRD 0x00000004 /* Memory read cycle */
84#define COMEM_DA_MEMWR 0x00000004 /* Memory write cycle */
85#define COMEM_DA_IORD 0x00000002 /* I/O read cycle */
86#define COMEM_DA_IOWR 0x00000002 /* I/O write cycle */
87#define COMEM_DA_CFGRD 0x00000006 /* Configuration read cycle */
88#define COMEM_DA_CFGWR 0x00000006 /* Configuration write cycle */
89
90#define COMEM_DA_ADDR(a) ((a) & 0xffffe000)
91
92#define COMEM_DA_OFFSET(a) ((a) & 0x00001fff)
93
94
95/*
96 * The PCI bus will be limited in what slots will actually be used.
97 * Define valid device numbers for different boards.
98 */
99#if defined(CONFIG_M5407C3)
100#define COMEM_MINDEV 14 /* Minimum valid DEVICE */
101#define COMEM_MAXDEV 14 /* Maximum valid DEVICE */
102#define COMEM_BRIDGEDEV 15 /* Slot bridge is in */
103#else
104#define COMEM_MINDEV 0 /* Minimum valid DEVICE */
105#define COMEM_MAXDEV 3 /* Maximum valid DEVICE */
106#endif
107
108#define COMEM_MAXPCI (COMEM_MAXDEV+1) /* Maximum PCI devices */
109
110
111/****************************************************************************/
112#endif /* anchor_h */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index 0392b28656ab..c0cb36350775 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -30,6 +30,8 @@ extern u_long atari_switches;
30extern int atari_rtc_year_offset; 30extern int atari_rtc_year_offset;
31extern int atari_dont_touch_floppy_select; 31extern int atari_dont_touch_floppy_select;
32 32
33extern int atari_SCC_reset_done;
34
33/* convenience macros for testing machine type */ 35/* convenience macros for testing machine type */
34#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST) 36#define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST)
35#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \ 37#define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 65c6be6c8180..4eba796c00d4 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -55,6 +55,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
55 return c != 0; 55 return c != 0;
56} 56}
57 57
58static inline int atomic_dec_and_test_lt(atomic_t *v)
59{
60 char c;
61 __asm__ __volatile__(
62 "subql #1,%1; slt %0"
63 : "=d" (c), "=m" (*v)
64 : "m" (*v));
65 return c != 0;
66}
67
58static inline int atomic_inc_and_test(atomic_t *v) 68static inline int atomic_inc_and_test(atomic_t *v)
59{ 69{
60 char c; 70 char c;
diff --git a/arch/m68k/include/asm/blinken.h b/arch/m68k/include/asm/blinken.h
index 1a749cf7b06d..0626582a7db4 100644
--- a/arch/m68k/include/asm/blinken.h
+++ b/arch/m68k/include/asm/blinken.h
@@ -17,15 +17,15 @@
17 17
18#define HP300_LEDS 0xf001ffff 18#define HP300_LEDS 0xf001ffff
19 19
20extern unsigned char ledstate; 20extern unsigned char hp300_ledstate;
21 21
22static __inline__ void blinken_leds(int on, int off) 22static __inline__ void blinken_leds(int on, int off)
23{ 23{
24 if (MACH_IS_HP300) 24 if (MACH_IS_HP300)
25 { 25 {
26 ledstate |= on; 26 hp300_ledstate |= on;
27 ledstate &= ~off; 27 hp300_ledstate &= ~off;
28 out_8(HP300_LEDS, ~ledstate); 28 out_8(HP300_LEDS, ~hp300_ledstate);
29 } 29 }
30} 30}
31 31
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 73de7c89d8e0..8104bd874649 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -2,23 +2,89 @@
2#define _M68K_CACHEFLUSH_H 2#define _M68K_CACHEFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#ifdef CONFIG_COLDFIRE
6#include <asm/mcfsim.h>
7#endif
5 8
6/* cache code */ 9/* cache code */
7#define FLUSH_I_AND_D (0x00000808) 10#define FLUSH_I_AND_D (0x00000808)
8#define FLUSH_I (0x00000008) 11#define FLUSH_I (0x00000008)
9 12
13#ifndef ICACHE_MAX_ADDR
14#define ICACHE_MAX_ADDR 0
15#define ICACHE_SET_MASK 0
16#define DCACHE_MAX_ADDR 0
17#define DCACHE_SETMASK 0
18#endif
19
20static inline void flush_cf_icache(unsigned long start, unsigned long end)
21{
22 unsigned long set;
23
24 for (set = start; set <= end; set += (0x10 - 3)) {
25 __asm__ __volatile__ (
26 "cpushl %%ic,(%0)\n\t"
27 "addq%.l #1,%0\n\t"
28 "cpushl %%ic,(%0)\n\t"
29 "addq%.l #1,%0\n\t"
30 "cpushl %%ic,(%0)\n\t"
31 "addq%.l #1,%0\n\t"
32 "cpushl %%ic,(%0)"
33 : "=a" (set)
34 : "a" (set));
35 }
36}
37
38static inline void flush_cf_dcache(unsigned long start, unsigned long end)
39{
40 unsigned long set;
41
42 for (set = start; set <= end; set += (0x10 - 3)) {
43 __asm__ __volatile__ (
44 "cpushl %%dc,(%0)\n\t"
45 "addq%.l #1,%0\n\t"
46 "cpushl %%dc,(%0)\n\t"
47 "addq%.l #1,%0\n\t"
48 "cpushl %%dc,(%0)\n\t"
49 "addq%.l #1,%0\n\t"
50 "cpushl %%dc,(%0)"
51 : "=a" (set)
52 : "a" (set));
53 }
54}
55
56static inline void flush_cf_bcache(unsigned long start, unsigned long end)
57{
58 unsigned long set;
59
60 for (set = start; set <= end; set += (0x10 - 3)) {
61 __asm__ __volatile__ (
62 "cpushl %%bc,(%0)\n\t"
63 "addq%.l #1,%0\n\t"
64 "cpushl %%bc,(%0)\n\t"
65 "addq%.l #1,%0\n\t"
66 "cpushl %%bc,(%0)\n\t"
67 "addq%.l #1,%0\n\t"
68 "cpushl %%bc,(%0)"
69 : "=a" (set)
70 : "a" (set));
71 }
72}
73
10/* 74/*
11 * Cache handling functions 75 * Cache handling functions
12 */ 76 */
13 77
14static inline void flush_icache(void) 78static inline void flush_icache(void)
15{ 79{
16 if (CPU_IS_040_OR_060) 80 if (CPU_IS_COLDFIRE) {
81 flush_cf_icache(0, ICACHE_MAX_ADDR);
82 } else if (CPU_IS_040_OR_060) {
17 asm volatile ( "nop\n" 83 asm volatile ( "nop\n"
18 " .chip 68040\n" 84 " .chip 68040\n"
19 " cpusha %bc\n" 85 " cpusha %bc\n"
20 " .chip 68k"); 86 " .chip 68k");
21 else { 87 } else {
22 unsigned long tmp; 88 unsigned long tmp;
23 asm volatile ( "movec %%cacr,%0\n" 89 asm volatile ( "movec %%cacr,%0\n"
24 " or.w %1,%0\n" 90 " or.w %1,%0\n"
@@ -51,12 +117,14 @@ extern void cache_push_v(unsigned long vaddr, int len);
51 process changes. */ 117 process changes. */
52#define __flush_cache_all() \ 118#define __flush_cache_all() \
53({ \ 119({ \
54 if (CPU_IS_040_OR_060) \ 120 if (CPU_IS_COLDFIRE) { \
121 flush_cf_dcache(0, DCACHE_MAX_ADDR); \
122 } else if (CPU_IS_040_OR_060) { \
55 __asm__ __volatile__("nop\n\t" \ 123 __asm__ __volatile__("nop\n\t" \
56 ".chip 68040\n\t" \ 124 ".chip 68040\n\t" \
57 "cpusha %dc\n\t" \ 125 "cpusha %dc\n\t" \
58 ".chip 68k"); \ 126 ".chip 68k"); \
59 else { \ 127 } else { \
60 unsigned long _tmp; \ 128 unsigned long _tmp; \
61 __asm__ __volatile__("movec %%cacr,%0\n\t" \ 129 __asm__ __volatile__("movec %%cacr,%0\n\t" \
62 "orw %1,%0\n\t" \ 130 "orw %1,%0\n\t" \
@@ -112,7 +180,17 @@ static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vm
112/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ 180/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
113static inline void __flush_page_to_ram(void *vaddr) 181static inline void __flush_page_to_ram(void *vaddr)
114{ 182{
115 if (CPU_IS_040_OR_060) { 183 if (CPU_IS_COLDFIRE) {
184 unsigned long addr, start, end;
185 addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
186 start = addr & ICACHE_SET_MASK;
187 end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
188 if (start > end) {
189 flush_cf_bcache(0, end);
190 end = ICACHE_MAX_ADDR;
191 }
192 flush_cf_bcache(start, end);
193 } else if (CPU_IS_040_OR_060) {
116 __asm__ __volatile__("nop\n\t" 194 __asm__ __volatile__("nop\n\t"
117 ".chip 68040\n\t" 195 ".chip 68040\n\t"
118 "cpushp %%bc,(%0)\n\t" 196 "cpushp %%bc,(%0)\n\t"
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index ec514485c8b6..2f88d867c711 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -3,6 +3,10 @@
3 3
4#include <linux/in6.h> 4#include <linux/in6.h>
5 5
6#ifdef CONFIG_GENERIC_CSUM
7#include <asm-generic/checksum.h>
8#else
9
6/* 10/*
7 * computes the checksum of a memory block at buff, length len, 11 * computes the checksum of a memory block at buff, length len,
8 * and adds in "sum" (32-bit) 12 * and adds in "sum" (32-bit)
@@ -34,30 +38,6 @@ extern __wsum csum_partial_copy_nocheck(const void *src,
34 void *dst, int len, 38 void *dst, int len,
35 __wsum sum); 39 __wsum sum);
36 40
37
38#ifdef CONFIG_COLDFIRE
39
40/*
41 * The ColdFire cores don't support all the 68k instructions used
42 * in the optimized checksum code below. So it reverts back to using
43 * more standard C coded checksums. The fast checksum code is
44 * significantly larger than the optimized version, so it is not
45 * inlined here.
46 */
47__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
48
49static inline __sum16 csum_fold(__wsum sum)
50{
51 unsigned int tmp = (__force u32)sum;
52
53 tmp = (tmp & 0xffff) + (tmp >> 16);
54 tmp = (tmp & 0xffff) + (tmp >> 16);
55
56 return (__force __sum16)~tmp;
57}
58
59#else
60
61/* 41/*
62 * This is a version of ip_fast_csum() optimized for IP headers, 42 * This is a version of ip_fast_csum() optimized for IP headers,
63 * which always checksum on 4 octet boundaries. 43 * which always checksum on 4 octet boundaries.
@@ -97,8 +77,6 @@ static inline __sum16 csum_fold(__wsum sum)
97 return (__force __sum16)~sum; 77 return (__force __sum16)~sum;
98} 78}
99 79
100#endif /* CONFIG_COLDFIRE */
101
102static inline __wsum 80static inline __wsum
103csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 81csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
104 unsigned short proto, __wsum sum) 82 unsigned short proto, __wsum sum)
@@ -167,4 +145,5 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
167 return csum_fold(sum); 145 return csum_fold(sum);
168} 146}
169 147
148#endif /* CONFIG_GENERIC_CSUM */
170#endif /* _M68K_CHECKSUM_H */ 149#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index edb66148a71d..444ea8a09e9f 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -1,7 +1,9 @@
1#ifndef _M68K_DIV64_H 1#ifndef _M68K_DIV64_H
2#define _M68K_DIV64_H 2#define _M68K_DIV64_H
3 3
4#ifdef CONFIG_MMU 4#ifdef CONFIG_CPU_HAS_NO_MULDIV64
5#include <asm-generic/div64.h>
6#else
5 7
6#include <linux/types.h> 8#include <linux/types.h>
7 9
@@ -27,8 +29,6 @@
27 __rem; \ 29 __rem; \
28}) 30})
29 31
30#else 32#endif /* CONFIG_CPU_HAS_NO_MULDIV64 */
31#include <asm-generic/div64.h>
32#endif /* CONFIG_MMU */
33 33
34#endif /* _M68K_DIV64_H */ 34#endif /* _M68K_DIV64_H */
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 01c193d91412..e9b7cda59744 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -59,10 +59,10 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
59 is actually used on ASV. */ 59 is actually used on ASV. */
60#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 60#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
61 61
62#ifndef CONFIG_SUN3 62#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
63#define ELF_EXEC_PAGESIZE 4096
64#else
65#define ELF_EXEC_PAGESIZE 8192 63#define ELF_EXEC_PAGESIZE 8192
64#else
65#define ELF_EXEC_PAGESIZE 4096
66#endif 66#endif
67 67
68/* This is the location that an ET_DYN program is loaded if exec'ed. Typical 68/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index c3c5a8643e15..622138dc7288 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -222,16 +222,24 @@
222 * Non-MMU systems do not reserve %a2 in this way, and this definition is 222 * Non-MMU systems do not reserve %a2 in this way, and this definition is
223 * not used for them. 223 * not used for them.
224 */ 224 */
225#ifdef CONFIG_MMU
226
225#define curptr a2 227#define curptr a2
226 228
227#define GET_CURRENT(tmp) get_current tmp 229#define GET_CURRENT(tmp) get_current tmp
228.macro get_current reg=%d0 230.macro get_current reg=%d0
229 movel %sp,\reg 231 movel %sp,\reg
230 andw #-THREAD_SIZE,\reg 232 andl #-THREAD_SIZE,\reg
231 movel \reg,%curptr 233 movel \reg,%curptr
232 movel %curptr@,%curptr 234 movel %curptr@,%curptr
233.endm 235.endm
234 236
237#else
238
239#define GET_CURRENT(tmp)
240
241#endif /* CONFIG_MMU */
242
235#else /* C source */ 243#else /* C source */
236 244
237#define STR(X) STR1(X) 245#define STR(X) STR1(X)
diff --git a/arch/m68k/include/asm/fpu.h b/arch/m68k/include/asm/fpu.h
index ffb6b8cfc6d5..526db9da9e43 100644
--- a/arch/m68k/include/asm/fpu.h
+++ b/arch/m68k/include/asm/fpu.h
@@ -12,6 +12,8 @@
12#define FPSTATESIZE (96) 12#define FPSTATESIZE (96)
13#elif defined(CONFIG_M68KFPU_EMU) 13#elif defined(CONFIG_M68KFPU_EMU)
14#define FPSTATESIZE (28) 14#define FPSTATESIZE (28)
15#elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
16#define FPSTATESIZE (16)
15#elif defined(CONFIG_M68060) 17#elif defined(CONFIG_M68060)
16#define FPSTATESIZE (12) 18#define FPSTATESIZE (12)
17#else 19#else
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index b2046839f4b2..00d0071de4c3 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -225,7 +225,8 @@ static inline void gpio_set_value(unsigned gpio, int value)
225 225
226static inline int gpio_to_irq(unsigned gpio) 226static inline int gpio_to_irq(unsigned gpio)
227{ 227{
228 return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL; 228 return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE
229 : __gpio_to_irq(gpio);
229} 230}
230 231
231static inline int irq_to_gpio(unsigned irq) 232static inline int irq_to_gpio(unsigned irq)
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
index a623ea3f0955..84c7e51cb6d0 100644
--- a/arch/m68k/include/asm/ipcbuf.h
+++ b/arch/m68k/include/asm/ipcbuf.h
@@ -1,29 +1 @@
1#ifndef __m68k_IPCBUF_H__ #include <asm-generic/ipcbuf.h>
2#define __m68k_IPCBUF_H__
3
4/*
5 * The user_ipc_perm structure for m68k architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid32_t uid;
18 __kernel_gid32_t gid;
19 __kernel_uid32_t cuid;
20 __kernel_gid32_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned short __pad2;
25 unsigned long __unused1;
26 unsigned long __unused2;
27};
28
29#endif /* __m68k_IPCBUF_H__ */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 6198df5ff245..0e89fa05de0e 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -25,7 +25,8 @@
25#define NR_IRQS 0 25#define NR_IRQS 0
26#endif 26#endif
27 27
28#ifdef CONFIG_MMU 28#if defined(CONFIG_M68020) || defined(CONFIG_M68030) || \
29 defined(CONFIG_M68040) || defined(CONFIG_M68060)
29 30
30/* 31/*
31 * Interrupt source definitions 32 * Interrupt source definitions
@@ -80,7 +81,7 @@ extern unsigned int irq_canonicalize(unsigned int irq);
80 81
81#else 82#else
82#define irq_canonicalize(irq) (irq) 83#define irq_canonicalize(irq) (irq)
83#endif /* CONFIG_MMU */ 84#endif /* !(CONFIG_M68020 || CONFIG_M68030 || CONFIG_M68040 || CONFIG_M68060) */
84 85
85asmlinkage void do_IRQ(int irq, struct pt_regs *regs); 86asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
86extern atomic_t irq_err_count; 87extern atomic_t irq_err_count;
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 16a1835f9b2a..47906aafbf67 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -39,8 +39,12 @@
39#define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */ 39#define ACR_CM_OFF_PRE 0x00000040 /* No cache, precise */
40#define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */ 40#define ACR_CM_OFF_IMP 0x00000060 /* No cache, imprecise */
41#define ACR_CM 0x00000060 /* Cache mode mask */ 41#define ACR_CM 0x00000060 /* Cache mode mask */
42#define ACR_SP 0x00000008 /* Supervisor protect */
42#define ACR_WPROTECT 0x00000004 /* Write protect */ 43#define ACR_WPROTECT 0x00000004 /* Write protect */
43 44
45#define ACR_BA(x) ((x) & 0xff000000)
46#define ACR_ADMSK(x) ((((x) - 1) & 0xff000000) >> 8)
47
44#if defined(CONFIG_M5407) 48#if defined(CONFIG_M5407)
45 49
46#define ICACHE_SIZE 0x4000 /* instruction - 16k */ 50#define ICACHE_SIZE 0x4000 /* instruction - 16k */
@@ -56,6 +60,11 @@
56#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */ 60#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
57#define CACHE_WAYS 4 /* 4 ways */ 61#define CACHE_WAYS 4 /* 4 ways */
58 62
63#define ICACHE_SET_MASK ((ICACHE_SIZE / 64 - 1) << CACHE_WAYS)
64#define DCACHE_SET_MASK ((DCACHE_SIZE / 64 - 1) << CACHE_WAYS)
65#define ICACHE_MAX_ADDR ICACHE_SET_MASK
66#define DCACHE_MAX_ADDR DCACHE_SET_MASK
67
59/* 68/*
60 * Version 4 cores have a true harvard style separate instruction 69 * Version 4 cores have a true harvard style separate instruction
61 * and data cache. Enable data and instruction caches, also enable write 70 * and data cache. Enable data and instruction caches, also enable write
@@ -73,6 +82,27 @@
73#else 82#else
74#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP) 83#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
75#endif 84#endif
85#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
86
87#if defined(CONFIG_MMU)
88/*
89 * If running with the MMU enabled then we need to map the internal
90 * register region as non-cacheable. And then we map all our RAM as
91 * cacheable and supervisor access only.
92 */
93#define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
94 ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
95#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
96 ACR_ENABLE+ACR_SUPER+ACR_SP)
97#define ACR2_MODE 0
98#define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
99 ACR_ENABLE+ACR_SUPER+ACR_SP)
100
101#else
102
103/*
104 * For the non-MMU enabled case we map all of RAM as cacheable.
105 */
76#if defined(CONFIG_CACHE_COPYBACK) 106#if defined(CONFIG_CACHE_COPYBACK)
77#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP) 107#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP)
78#else 108#else
@@ -80,7 +110,6 @@
80#endif 110#endif
81#define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY) 111#define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
82 112
83#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
84#define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA) 113#define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
85#define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA) 114#define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
86#define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA) 115#define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
@@ -94,4 +123,5 @@
94#define CACHE_PUSH 123#define CACHE_PUSH
95#endif 124#endif
96 125
126#endif /* CONFIG_MMU */
97#endif /* m54xxacr_h */ 127#endif /* m54xxacr_h */
diff --git a/arch/m68k/include/asm/mac_baboon.h b/arch/m68k/include/asm/mac_baboon.h
index c2a042b8c349..a2d32f6589f9 100644
--- a/arch/m68k/include/asm/mac_baboon.h
+++ b/arch/m68k/include/asm/mac_baboon.h
@@ -29,4 +29,10 @@ struct baboon {
29 */ 29 */
30}; 30};
31 31
32extern int baboon_present;
33
34extern void baboon_register_interrupts(void);
35extern void baboon_irq_enable(int);
36extern void baboon_irq_disable(int);
37
32#endif /* __ASSEMBLY **/ 38#endif /* __ASSEMBLY **/
diff --git a/arch/m68k/include/asm/mac_iop.h b/arch/m68k/include/asm/mac_iop.h
index a2c7e6fcca38..fde874a01e20 100644
--- a/arch/m68k/include/asm/mac_iop.h
+++ b/arch/m68k/include/asm/mac_iop.h
@@ -159,4 +159,6 @@ extern void iop_upload_code(uint, __u8 *, uint, __u16);
159extern void iop_download_code(uint, __u8 *, uint, __u16); 159extern void iop_download_code(uint, __u8 *, uint, __u16);
160extern __u8 *iop_compare_code(uint, __u8 *, uint, __u16); 160extern __u8 *iop_compare_code(uint, __u8 *, uint, __u16);
161 161
162extern void iop_register_interrupts(void);
163
162#endif /* __ASSEMBLY__ */ 164#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/mac_oss.h b/arch/m68k/include/asm/mac_oss.h
index 3cf2b6ed685a..425fbff4f4d8 100644
--- a/arch/m68k/include/asm/mac_oss.h
+++ b/arch/m68k/include/asm/mac_oss.h
@@ -58,25 +58,6 @@
58 58
59#define OSS_POWEROFF 0x80 59#define OSS_POWEROFF 0x80
60 60
61/*
62 * OSS Interrupt levels for various sub-systems
63 *
64 * This mapping is laid out with two things in mind: first, we try to keep
65 * things on their own levels to avoid having to do double-dispatches. Second,
66 * the levels match as closely as possible the alternate IRQ mapping mode (aka
67 * "A/UX mode") available on some VIA machines.
68 */
69
70#define OSS_IRQLEV_DISABLED 0
71#define OSS_IRQLEV_IOPISM 1 /* ADB? */
72#define OSS_IRQLEV_SCSI IRQ_AUTO_2
73#define OSS_IRQLEV_NUBUS IRQ_AUTO_3 /* keep this on its own level */
74#define OSS_IRQLEV_IOPSCC IRQ_AUTO_4 /* matches VIA alternate mapping */
75#define OSS_IRQLEV_SOUND IRQ_AUTO_5 /* matches VIA alternate mapping */
76#define OSS_IRQLEV_60HZ 6 /* matches VIA alternate mapping */
77#define OSS_IRQLEV_VIA1 IRQ_AUTO_6 /* matches VIA alternate mapping */
78#define OSS_IRQLEV_PARITY 7 /* matches VIA alternate mapping */
79
80#ifndef __ASSEMBLY__ 61#ifndef __ASSEMBLY__
81 62
82struct mac_oss { 63struct mac_oss {
@@ -91,4 +72,8 @@ struct mac_oss {
91extern volatile struct mac_oss *oss; 72extern volatile struct mac_oss *oss;
92extern int oss_present; 73extern int oss_present;
93 74
75extern void oss_register_interrupts(void);
76extern void oss_irq_enable(int);
77extern void oss_irq_disable(int);
78
94#endif /* __ASSEMBLY__ */ 79#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/mac_psc.h b/arch/m68k/include/asm/mac_psc.h
index 7808bb0b2323..e5c0d71d1543 100644
--- a/arch/m68k/include/asm/mac_psc.h
+++ b/arch/m68k/include/asm/mac_psc.h
@@ -211,6 +211,10 @@
211extern volatile __u8 *psc; 211extern volatile __u8 *psc;
212extern int psc_present; 212extern int psc_present;
213 213
214extern void psc_register_interrupts(void);
215extern void psc_irq_enable(int);
216extern void psc_irq_disable(int);
217
214/* 218/*
215 * Access functions 219 * Access functions
216 */ 220 */
diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h
index a59665e1d41b..aeeedf8b2d25 100644
--- a/arch/m68k/include/asm/mac_via.h
+++ b/arch/m68k/include/asm/mac_via.h
@@ -254,6 +254,15 @@
254extern volatile __u8 *via1,*via2; 254extern volatile __u8 *via1,*via2;
255extern int rbv_present,via_alt_mapping; 255extern int rbv_present,via_alt_mapping;
256 256
257extern void via_register_interrupts(void);
258extern void via_irq_enable(int);
259extern void via_irq_disable(int);
260extern void via_nubus_irq_startup(int irq);
261extern void via_nubus_irq_shutdown(int irq);
262extern void via1_irq(unsigned int irq, struct irq_desc *desc);
263extern void via1_set_head(int);
264extern int via2_scsi_drq_pending(void);
265
257static inline int rbv_set_video_bpp(int bpp) 266static inline int rbv_set_video_bpp(int bpp)
258{ 267{
259 char val = (bpp==1)?0:(bpp==2)?1:(bpp==4)?2:(bpp==8)?3:-1; 268 char val = (bpp==1)?0:(bpp==2)?1:(bpp==4)?2:(bpp==8)?3:-1;
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 12ebe43b008b..682a1a2ff55f 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -11,17 +11,11 @@
11extern void mac_reset(void); 11extern void mac_reset(void);
12extern void mac_poweroff(void); 12extern void mac_poweroff(void);
13extern void mac_init_IRQ(void); 13extern void mac_init_IRQ(void);
14extern int mac_irq_pending(unsigned int); 14
15extern void mac_irq_enable(struct irq_data *data); 15extern void mac_irq_enable(struct irq_data *data);
16extern void mac_irq_disable(struct irq_data *data); 16extern void mac_irq_disable(struct irq_data *data);
17 17
18/* 18/*
19 * Floppy driver magic hook - probably shouldn't be here
20 */
21
22extern void via1_set_head(int);
23
24/*
25 * Macintosh Table 19 * Macintosh Table
26 */ 20 */
27 21
@@ -48,7 +42,7 @@ struct mac_model
48#define MAC_ADB_IOP 6 42#define MAC_ADB_IOP 6
49 43
50#define MAC_VIA_II 1 44#define MAC_VIA_II 1
51#define MAC_VIA_IIci 2 45#define MAC_VIA_IICI 2
52#define MAC_VIA_QUADRA 3 46#define MAC_VIA_QUADRA 3
53 47
54#define MAC_SCSI_NONE 0 48#define MAC_SCSI_NONE 0
diff --git a/arch/m68k/include/asm/macints.h b/arch/m68k/include/asm/macints.h
index ebe1b70fe90c..92aa8a4c2d03 100644
--- a/arch/m68k/include/asm/macints.h
+++ b/arch/m68k/include/asm/macints.h
@@ -104,6 +104,9 @@
104#define IRQ_PSC4_3 (35) 104#define IRQ_PSC4_3 (35)
105#define IRQ_MAC_MACE_DMA IRQ_PSC4_3 105#define IRQ_MAC_MACE_DMA IRQ_PSC4_3
106 106
107/* OSS Level 4 interrupts */
108#define IRQ_MAC_SCC (33)
109
107/* Level 5 (PSC, AV Macs only) interrupts */ 110/* Level 5 (PSC, AV Macs only) interrupts */
108#define IRQ_PSC5_0 (40) 111#define IRQ_PSC5_0 (40)
109#define IRQ_PSC5_1 (41) 112#define IRQ_PSC5_1 (41)
@@ -131,9 +134,6 @@
131#define IRQ_BABOON_2 (66) 134#define IRQ_BABOON_2 (66)
132#define IRQ_BABOON_3 (67) 135#define IRQ_BABOON_3 (67)
133 136
134/* On non-PSC machines, the serial ports share an IRQ */
135#define IRQ_MAC_SCC IRQ_AUTO_4
136
137#define SLOT2IRQ(x) (x + 47) 137#define SLOT2IRQ(x) (x + 47)
138#define IRQ2SLOT(x) (x - 47) 138#define IRQ2SLOT(x) (x - 47)
139 139
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
new file mode 100644
index 000000000000..313f3dd23cdc
--- /dev/null
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -0,0 +1,102 @@
1#ifndef M68K_MCF_PGALLOC_H
2#define M68K_MCF_PGALLOC_H
3
4#include <asm/tlb.h>
5#include <asm/tlbflush.h>
6
7extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
8{
9 free_page((unsigned long) pte);
10}
11
12extern const char bad_pmd_string[];
13
14extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
15 unsigned long address)
16{
17 unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
18
19 if (!page)
20 return NULL;
21
22 memset((void *)page, 0, PAGE_SIZE);
23 return (pte_t *) (page);
24}
25
26extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
27{
28 return (pmd_t *) pgd;
29}
30
31#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
32#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
33
34#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
35
36#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
37 (unsigned long)(page_address(page)))
38
39#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
40
41#define pmd_pgtable(pmd) pmd_page(pmd)
42
43static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
44 unsigned long address)
45{
46 __free_page(page);
47}
48
49#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
50
51static inline struct page *pte_alloc_one(struct mm_struct *mm,
52 unsigned long address)
53{
54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
55 pte_t *pte;
56
57 if (!page)
58 return NULL;
59
60 pte = kmap(page);
61 if (pte) {
62 clear_page(pte);
63 __flush_page_to_ram(pte);
64 flush_tlb_kernel_page(pte);
65 nocache_page(pte);
66 }
67 kunmap(page);
68
69 return page;
70}
71
72extern inline void pte_free(struct mm_struct *mm, struct page *page)
73{
74 __free_page(page);
75}
76
77/*
78 * In our implementation, each pgd entry contains 1 pmd that is never allocated
79 * or freed. pgd_present is always 1, so this should never be called. -NL
80 */
81#define pmd_free(mm, pmd) BUG()
82
83static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
84{
85 free_page((unsigned long) pgd);
86}
87
88static inline pgd_t *pgd_alloc(struct mm_struct *mm)
89{
90 pgd_t *new_pgd;
91
92 new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
93 if (!new_pgd)
94 return NULL;
95 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
96 memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
97 return new_pgd;
98}
99
100#define pgd_populate(mm, pmd, pte) BUG()
101
102#endif /* M68K_MCF_PGALLOC_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
new file mode 100644
index 000000000000..756bde4fb4f8
--- /dev/null
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -0,0 +1,425 @@
1#ifndef _MCF_PGTABLE_H
2#define _MCF_PGTABLE_H
3
4#include <asm/mcfmmu.h>
5#include <asm/page.h>
6
7/*
8 * MMUDR bits, in proper place. We write these directly into the MMUDR
9 * after masking from the pte.
10 */
11#define CF_PAGE_LOCKED MMUDR_LK /* 0x00000002 */
12#define CF_PAGE_EXEC MMUDR_X /* 0x00000004 */
13#define CF_PAGE_WRITABLE MMUDR_W /* 0x00000008 */
14#define CF_PAGE_READABLE MMUDR_R /* 0x00000010 */
15#define CF_PAGE_SYSTEM MMUDR_SP /* 0x00000020 */
16#define CF_PAGE_COPYBACK MMUDR_CM_CCB /* 0x00000040 */
17#define CF_PAGE_NOCACHE MMUDR_CM_NCP /* 0x00000080 */
18
19#define CF_CACHEMASK (~MMUDR_CM_CCB)
20#define CF_PAGE_MMUDR_MASK 0x000000fe
21
22#define _PAGE_NOCACHE030 CF_PAGE_NOCACHE
23
24/*
25 * MMUTR bits, need shifting down.
26 */
27#define CF_PAGE_MMUTR_MASK 0x00000c00
28#define CF_PAGE_MMUTR_SHIFT 10
29
30#define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT)
31#define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
32
33/*
34 * Fake bits, not implemented in CF, will get masked out before
35 * hitting hardware.
36 */
37#define CF_PAGE_DIRTY 0x00000001
38#define CF_PAGE_FILE 0x00000200
39#define CF_PAGE_ACCESSED 0x00001000
40
41#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
42#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
43#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
44#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
45#define _DESCTYPE_MASK 0x003
46#define _CACHEMASK040 (~0x060)
47#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
48
49/*
50 * Externally used page protection values.
51 */
52#define _PAGE_PRESENT (CF_PAGE_VALID)
53#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
54#define _PAGE_DIRTY (CF_PAGE_DIRTY)
55#define _PAGE_READWRITE (CF_PAGE_READABLE \
56 | CF_PAGE_WRITABLE \
57 | CF_PAGE_SYSTEM \
58 | CF_PAGE_SHARED)
59
60/*
61 * Compound page protection values.
62 */
63#define PAGE_NONE __pgprot(CF_PAGE_VALID \
64 | CF_PAGE_ACCESSED)
65
66#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
67 | CF_PAGE_ACCESSED \
68 | CF_PAGE_SHARED)
69
70#define PAGE_INIT __pgprot(CF_PAGE_VALID \
71 | CF_PAGE_READABLE \
72 | CF_PAGE_WRITABLE \
73 | CF_PAGE_EXEC \
74 | CF_PAGE_SYSTEM)
75
76#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
77 | CF_PAGE_ACCESSED \
78 | CF_PAGE_READABLE \
79 | CF_PAGE_WRITABLE \
80 | CF_PAGE_EXEC \
81 | CF_PAGE_SYSTEM)
82
83#define PAGE_COPY __pgprot(CF_PAGE_VALID \
84 | CF_PAGE_ACCESSED \
85 | CF_PAGE_READABLE \
86 | CF_PAGE_DIRTY)
87
88/*
89 * Page protections for initialising protection_map. See mm/mmap.c
90 * for use. In general, the bit positions are xwr, and P-items are
91 * private, the S-items are shared.
92 */
93#define __P000 PAGE_NONE
94#define __P001 __pgprot(CF_PAGE_VALID \
95 | CF_PAGE_ACCESSED \
96 | CF_PAGE_READABLE)
97#define __P010 __pgprot(CF_PAGE_VALID \
98 | CF_PAGE_ACCESSED \
99 | CF_PAGE_WRITABLE)
100#define __P011 __pgprot(CF_PAGE_VALID \
101 | CF_PAGE_ACCESSED \
102 | CF_PAGE_READABLE \
103 | CF_PAGE_WRITABLE)
104#define __P100 __pgprot(CF_PAGE_VALID \
105 | CF_PAGE_ACCESSED \
106 | CF_PAGE_EXEC)
107#define __P101 __pgprot(CF_PAGE_VALID \
108 | CF_PAGE_ACCESSED \
109 | CF_PAGE_READABLE \
110 | CF_PAGE_EXEC)
111#define __P110 __pgprot(CF_PAGE_VALID \
112 | CF_PAGE_ACCESSED \
113 | CF_PAGE_WRITABLE \
114 | CF_PAGE_EXEC)
115#define __P111 __pgprot(CF_PAGE_VALID \
116 | CF_PAGE_ACCESSED \
117 | CF_PAGE_READABLE \
118 | CF_PAGE_WRITABLE \
119 | CF_PAGE_EXEC)
120
121#define __S000 PAGE_NONE
122#define __S001 __pgprot(CF_PAGE_VALID \
123 | CF_PAGE_ACCESSED \
124 | CF_PAGE_READABLE)
125#define __S010 PAGE_SHARED
126#define __S011 __pgprot(CF_PAGE_VALID \
127 | CF_PAGE_ACCESSED \
128 | CF_PAGE_SHARED \
129 | CF_PAGE_READABLE)
130#define __S100 __pgprot(CF_PAGE_VALID \
131 | CF_PAGE_ACCESSED \
132 | CF_PAGE_EXEC)
133#define __S101 __pgprot(CF_PAGE_VALID \
134 | CF_PAGE_ACCESSED \
135 | CF_PAGE_READABLE \
136 | CF_PAGE_EXEC)
137#define __S110 __pgprot(CF_PAGE_VALID \
138 | CF_PAGE_ACCESSED \
139 | CF_PAGE_SHARED \
140 | CF_PAGE_EXEC)
141#define __S111 __pgprot(CF_PAGE_VALID \
142 | CF_PAGE_ACCESSED \
143 | CF_PAGE_SHARED \
144 | CF_PAGE_READABLE \
145 | CF_PAGE_EXEC)
146
147#define PTE_MASK PAGE_MASK
148#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
149
150#ifndef __ASSEMBLY__
151
152/*
153 * Conversion functions: convert a page and protection to a page entry,
154 * and a page entry and page directory to the page they refer to.
155 */
156#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
157
158static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
159{
160 pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
161 return pte;
162}
163
164#define pmd_set(pmdp, ptep) do {} while (0)
165
166static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
167{
168 pgd_val(*pgdp) = virt_to_phys(pmdp);
169}
170
171#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
172#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
173
174static inline int pte_none(pte_t pte)
175{
176 return !pte_val(pte);
177}
178
179static inline int pte_present(pte_t pte)
180{
181 return pte_val(pte) & CF_PAGE_VALID;
182}
183
184static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
185 pte_t *ptep)
186{
187 pte_val(*ptep) = 0;
188}
189
190#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
191#define pte_page(pte) virt_to_page(__pte_page(pte))
192
193static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
194#define pmd_none(pmd) pmd_none2(&(pmd))
195static inline int pmd_bad2(pmd_t *pmd) { return 0; }
196#define pmd_bad(pmd) pmd_bad2(&(pmd))
197#define pmd_present(pmd) (!pmd_none2(&(pmd)))
198static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
199
200static inline int pgd_none(pgd_t pgd) { return 0; }
201static inline int pgd_bad(pgd_t pgd) { return 0; }
202static inline int pgd_present(pgd_t pgd) { return 1; }
203static inline void pgd_clear(pgd_t *pgdp) {}
204
205#define pte_ERROR(e) \
206 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
207 __FILE__, __LINE__, pte_val(e))
208#define pmd_ERROR(e) \
209 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
210 __FILE__, __LINE__, pmd_val(e))
211#define pgd_ERROR(e) \
212 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
213 __FILE__, __LINE__, pgd_val(e))
214
215/*
216 * The following only work if pte_present() is true.
217 * Undefined behaviour if not...
218 * [we have the full set here even if they don't change from m68k]
219 */
220static inline int pte_read(pte_t pte)
221{
222 return pte_val(pte) & CF_PAGE_READABLE;
223}
224
225static inline int pte_write(pte_t pte)
226{
227 return pte_val(pte) & CF_PAGE_WRITABLE;
228}
229
230static inline int pte_exec(pte_t pte)
231{
232 return pte_val(pte) & CF_PAGE_EXEC;
233}
234
235static inline int pte_dirty(pte_t pte)
236{
237 return pte_val(pte) & CF_PAGE_DIRTY;
238}
239
240static inline int pte_young(pte_t pte)
241{
242 return pte_val(pte) & CF_PAGE_ACCESSED;
243}
244
245static inline int pte_file(pte_t pte)
246{
247 return pte_val(pte) & CF_PAGE_FILE;
248}
249
250static inline int pte_special(pte_t pte)
251{
252 return 0;
253}
254
255static inline pte_t pte_wrprotect(pte_t pte)
256{
257 pte_val(pte) &= ~CF_PAGE_WRITABLE;
258 return pte;
259}
260
261static inline pte_t pte_rdprotect(pte_t pte)
262{
263 pte_val(pte) &= ~CF_PAGE_READABLE;
264 return pte;
265}
266
267static inline pte_t pte_exprotect(pte_t pte)
268{
269 pte_val(pte) &= ~CF_PAGE_EXEC;
270 return pte;
271}
272
273static inline pte_t pte_mkclean(pte_t pte)
274{
275 pte_val(pte) &= ~CF_PAGE_DIRTY;
276 return pte;
277}
278
279static inline pte_t pte_mkold(pte_t pte)
280{
281 pte_val(pte) &= ~CF_PAGE_ACCESSED;
282 return pte;
283}
284
285static inline pte_t pte_mkwrite(pte_t pte)
286{
287 pte_val(pte) |= CF_PAGE_WRITABLE;
288 return pte;
289}
290
291static inline pte_t pte_mkread(pte_t pte)
292{
293 pte_val(pte) |= CF_PAGE_READABLE;
294 return pte;
295}
296
297static inline pte_t pte_mkexec(pte_t pte)
298{
299 pte_val(pte) |= CF_PAGE_EXEC;
300 return pte;
301}
302
303static inline pte_t pte_mkdirty(pte_t pte)
304{
305 pte_val(pte) |= CF_PAGE_DIRTY;
306 return pte;
307}
308
309static inline pte_t pte_mkyoung(pte_t pte)
310{
311 pte_val(pte) |= CF_PAGE_ACCESSED;
312 return pte;
313}
314
315static inline pte_t pte_mknocache(pte_t pte)
316{
317 pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40);
318 return pte;
319}
320
321static inline pte_t pte_mkcache(pte_t pte)
322{
323 pte_val(pte) &= ~CF_PAGE_NOCACHE;
324 return pte;
325}
326
327static inline pte_t pte_mkspecial(pte_t pte)
328{
329 return pte;
330}
331
332#define swapper_pg_dir kernel_pg_dir
333extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
334
335/*
336 * Find an entry in a pagetable directory.
337 */
338#define pgd_index(address) ((address) >> PGDIR_SHIFT)
339#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
340
341/*
342 * Find an entry in a kernel pagetable directory.
343 */
344#define pgd_offset_k(address) pgd_offset(&init_mm, address)
345
346/*
347 * Find an entry in the second-level pagetable.
348 */
349static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
350{
351 return (pmd_t *) pgd;
352}
353
354/*
355 * Find an entry in the third-level pagetable.
356 */
357#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
358#define pte_offset_kernel(dir, address) \
359 ((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
360
361/*
362 * Disable caching for page at given kernel virtual address.
363 */
364static inline void nocache_page(void *vaddr)
365{
366 pgd_t *dir;
367 pmd_t *pmdp;
368 pte_t *ptep;
369 unsigned long addr = (unsigned long) vaddr;
370
371 dir = pgd_offset_k(addr);
372 pmdp = pmd_offset(dir, addr);
373 ptep = pte_offset_kernel(pmdp, addr);
374 *ptep = pte_mknocache(*ptep);
375}
376
377/*
378 * Enable caching for page at given kernel virtual address.
379 */
380static inline void cache_page(void *vaddr)
381{
382 pgd_t *dir;
383 pmd_t *pmdp;
384 pte_t *ptep;
385 unsigned long addr = (unsigned long) vaddr;
386
387 dir = pgd_offset_k(addr);
388 pmdp = pmd_offset(dir, addr);
389 ptep = pte_offset_kernel(pmdp, addr);
390 *ptep = pte_mkcache(*ptep);
391}
392
393#define PTE_FILE_MAX_BITS 21
394#define PTE_FILE_SHIFT 11
395
396static inline unsigned long pte_to_pgoff(pte_t pte)
397{
398 return pte_val(pte) >> PTE_FILE_SHIFT;
399}
400
401static inline pte_t pgoff_to_pte(unsigned pgoff)
402{
403 return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
404}
405
406/*
407 * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
408 */
409#define __swp_type(x) ((x).val & 0xFF)
410#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
411#define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \
412 (off << PTE_FILE_SHIFT) })
413#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
414#define __swp_entry_to_pte(x) (__pte((x).val))
415
416#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
417
418#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
419 __pte_offset(addr))
420#define pte_unmap(pte) ((void) 0)
421#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
422#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
423
424#endif /* !__ASSEMBLY__ */
425#endif /* _MCF_PGTABLE_H */
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
new file mode 100644
index 000000000000..26cc3d5a63f8
--- /dev/null
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -0,0 +1,112 @@
1/*
2 * mcfmmu.h -- definitions for the ColdFire v4e MMU
3 *
4 * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef MCFMMU_H
12#define MCFMMU_H
13
14/*
15 * The MMU support registers are mapped into the address space using
16 * the processor MMUBASE register. We used a fixed address for mapping,
17 * there doesn't seem any need to make this configurable yet.
18 */
19#define MMUBASE 0xfe000000
20
21/*
22 * The support registers of the MMU. Names are the sames as those
23 * used in the Freescale v4e documentation.
24 */
25#define MMUCR (MMUBASE + 0x00) /* Control register */
26#define MMUOR (MMUBASE + 0x04) /* Operation register */
27#define MMUSR (MMUBASE + 0x08) /* Status register */
28#define MMUAR (MMUBASE + 0x10) /* TLB Address register */
29#define MMUTR (MMUBASE + 0x14) /* TLB Tag register */
30#define MMUDR (MMUBASE + 0x18) /* TLB Data register */
31
32/*
33 * MMU Control register bit flags
34 */
35#define MMUCR_EN 0x00000001 /* Virtual mode enable */
36#define MMUCR_ASM 0x00000002 /* Address space mode */
37
38/*
39 * MMU Operation register.
40 */
41#define MMUOR_UAA 0x00000001 /* Update allocatiom address */
42#define MMUOR_ACC 0x00000002 /* TLB access */
43#define MMUOR_RD 0x00000004 /* TLB access read */
44#define MMUOR_WR 0x00000000 /* TLB access write */
45#define MMUOR_ADR 0x00000008 /* TLB address select */
46#define MMUOR_ITLB 0x00000010 /* ITLB operation */
47#define MMUOR_CAS 0x00000020 /* Clear non-locked ASID TLBs */
48#define MMUOR_CNL 0x00000040 /* Clear non-locked TLBs */
49#define MMUOR_CA 0x00000080 /* Clear all TLBs */
50#define MMUOR_STLB 0x00000100 /* Search TLBs */
51#define MMUOR_AAN 16 /* TLB allocation address */
52#define MMUOR_AAMASK 0xffff0000 /* AA mask */
53
54/*
55 * MMU Status register.
56 */
57#define MMUSR_HIT 0x00000002 /* Search TLB hit */
58#define MMUSR_WF 0x00000008 /* Write access fault */
59#define MMUSR_RF 0x00000010 /* Read access fault */
60#define MMUSR_SPF 0x00000020 /* Supervisor protect fault */
61
62/*
63 * MMU Read/Write Tag register.
64 */
65#define MMUTR_V 0x00000001 /* Valid */
66#define MMUTR_SG 0x00000002 /* Shared global */
67#define MMUTR_IDN 2 /* Address Space ID */
68#define MMUTR_IDMASK 0x000003fc /* ASID mask */
69#define MMUTR_VAN 10 /* Virtual Address */
70#define MMUTR_VAMASK 0xfffffc00 /* VA mask */
71
72/*
73 * MMU Read/Write Data register.
74 */
75#define MMUDR_LK 0x00000002 /* Lock entry */
76#define MMUDR_X 0x00000004 /* Execute access enable */
77#define MMUDR_W 0x00000008 /* Write access enable */
78#define MMUDR_R 0x00000010 /* Read access enable */
79#define MMUDR_SP 0x00000020 /* Supervisor access enable */
80#define MMUDR_CM_CWT 0x00000000 /* Cachable write thru */
81#define MMUDR_CM_CCB 0x00000040 /* Cachable copy back */
82#define MMUDR_CM_NCP 0x00000080 /* Non-cachable precise */
83#define MMUDR_CM_NCI 0x000000c0 /* Non-cachable imprecise */
84#define MMUDR_SZ_1MB 0x00000000 /* 1MB page size */
85#define MMUDR_SZ_4KB 0x00000100 /* 4kB page size */
86#define MMUDR_SZ_8KB 0x00000200 /* 8kB page size */
87#define MMUDR_SZ_1KB 0x00000300 /* 1kB page size */
88#define MMUDR_PAN 10 /* Physical address */
89#define MMUDR_PAMASK 0xfffffc00 /* PA mask */
90
91#ifndef __ASSEMBLY__
92
93/*
94 * Simple access functions for the MMU registers. Nothing fancy
95 * currently required, just simple 32bit access.
96 */
97static inline u32 mmu_read(u32 a)
98{
99 return *((volatile u32 *) a);
100}
101
102static inline void mmu_write(u32 a, u32 v)
103{
104 *((volatile u32 *) a) = v;
105 __asm__ __volatile__ ("nop");
106}
107
108int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
109
110#endif
111
112#endif /* MCFMMU_H */
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index 7d4341e55a99..dc3be991d634 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -8,7 +8,206 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8} 8}
9 9
10#ifdef CONFIG_MMU 10#ifdef CONFIG_MMU
11#ifndef CONFIG_SUN3 11
12#if defined(CONFIG_COLDFIRE)
13
14#include <asm/atomic.h>
15#include <asm/bitops.h>
16#include <asm/mcfmmu.h>
17#include <asm/mmu.h>
18
19#define NO_CONTEXT 256
20#define LAST_CONTEXT 255
21#define FIRST_CONTEXT 1
22
23extern unsigned long context_map[];
24extern mm_context_t next_mmu_context;
25
26extern atomic_t nr_free_contexts;
27extern struct mm_struct *context_mm[LAST_CONTEXT+1];
28extern void steal_context(void);
29
30static inline void get_mmu_context(struct mm_struct *mm)
31{
32 mm_context_t ctx;
33
34 if (mm->context != NO_CONTEXT)
35 return;
36 while (atomic_dec_and_test_lt(&nr_free_contexts)) {
37 atomic_inc(&nr_free_contexts);
38 steal_context();
39 }
40 ctx = next_mmu_context;
41 while (test_and_set_bit(ctx, context_map)) {
42 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
43 if (ctx > LAST_CONTEXT)
44 ctx = 0;
45 }
46 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
47 mm->context = ctx;
48 context_mm[ctx] = mm;
49}
50
51/*
52 * Set up the context for a new address space.
53 */
54#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
55
56/*
57 * We're finished using the context for an address space.
58 */
59static inline void destroy_context(struct mm_struct *mm)
60{
61 if (mm->context != NO_CONTEXT) {
62 clear_bit(mm->context, context_map);
63 mm->context = NO_CONTEXT;
64 atomic_inc(&nr_free_contexts);
65 }
66}
67
68static inline void set_context(mm_context_t context, pgd_t *pgd)
69{
70 __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
71}
72
73static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
74 struct task_struct *tsk)
75{
76 get_mmu_context(tsk->mm);
77 set_context(tsk->mm->context, next->pgd);
78}
79
80/*
81 * After we have set current->mm to a new value, this activates
82 * the context for the new mm so we see the new mappings.
83 */
84static inline void activate_mm(struct mm_struct *active_mm,
85 struct mm_struct *mm)
86{
87 get_mmu_context(mm);
88 set_context(mm->context, mm->pgd);
89}
90
91#define deactivate_mm(tsk, mm) do { } while (0)
92
93extern void mmu_context_init(void);
94#define prepare_arch_switch(next) load_ksp_mmu(next)
95
96static inline void load_ksp_mmu(struct task_struct *task)
97{
98 unsigned long flags;
99 struct mm_struct *mm;
100 int asid;
101 pgd_t *pgd;
102 pmd_t *pmd;
103 pte_t *pte;
104 unsigned long mmuar;
105
106 local_irq_save(flags);
107 mmuar = task->thread.ksp;
108
109 /* Search for a valid TLB entry, if one is found, don't remap */
110 mmu_write(MMUAR, mmuar);
111 mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
112 if (mmu_read(MMUSR) & MMUSR_HIT)
113 goto end;
114
115 if (mmuar >= PAGE_OFFSET) {
116 mm = &init_mm;
117 } else {
118 pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
119 mm = task->mm;
120 }
121
122 if (!mm)
123 goto bug;
124
125 pgd = pgd_offset(mm, mmuar);
126 if (pgd_none(*pgd))
127 goto bug;
128
129 pmd = pmd_offset(pgd, mmuar);
130 if (pmd_none(*pmd))
131 goto bug;
132
133 pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
134 : pte_offset_map(pmd, mmuar);
135 if (pte_none(*pte) || !pte_present(*pte))
136 goto bug;
137
138 set_pte(pte, pte_mkyoung(*pte));
139 asid = mm->context & 0xff;
140 if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
141 set_pte(pte, pte_wrprotect(*pte));
142
143 mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
144 (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
145 >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
146
147 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
148 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
149
150 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
151
152 goto end;
153
154bug:
155 pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
156end:
157 local_irq_restore(flags);
158}
159
160#elif defined(CONFIG_SUN3)
161#include <asm/sun3mmu.h>
162#include <linux/sched.h>
163
164extern unsigned long get_free_context(struct mm_struct *mm);
165extern void clear_context(unsigned long context);
166
167/* set the context for a new task to unmapped */
168static inline int init_new_context(struct task_struct *tsk,
169 struct mm_struct *mm)
170{
171 mm->context = SUN3_INVALID_CONTEXT;
172 return 0;
173}
174
175/* find the context given to this process, and if it hasn't already
176 got one, go get one for it. */
177static inline void get_mmu_context(struct mm_struct *mm)
178{
179 if (mm->context == SUN3_INVALID_CONTEXT)
180 mm->context = get_free_context(mm);
181}
182
183/* flush context if allocated... */
184static inline void destroy_context(struct mm_struct *mm)
185{
186 if (mm->context != SUN3_INVALID_CONTEXT)
187 clear_context(mm->context);
188}
189
190static inline void activate_context(struct mm_struct *mm)
191{
192 get_mmu_context(mm);
193 sun3_put_context(mm->context);
194}
195
196static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
197 struct task_struct *tsk)
198{
199 activate_context(tsk->mm);
200}
201
202#define deactivate_mm(tsk, mm) do { } while (0)
203
204static inline void activate_mm(struct mm_struct *prev_mm,
205 struct mm_struct *next_mm)
206{
207 activate_context(next_mm);
208}
209
210#else
12 211
13#include <asm/setup.h> 212#include <asm/setup.h>
14#include <asm/page.h> 213#include <asm/page.h>
@@ -103,55 +302,8 @@ static inline void activate_mm(struct mm_struct *prev_mm,
103 switch_mm_0460(next_mm); 302 switch_mm_0460(next_mm);
104} 303}
105 304
106#else /* CONFIG_SUN3 */
107#include <asm/sun3mmu.h>
108#include <linux/sched.h>
109
110extern unsigned long get_free_context(struct mm_struct *mm);
111extern void clear_context(unsigned long context);
112
113/* set the context for a new task to unmapped */
114static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
115{
116 mm->context = SUN3_INVALID_CONTEXT;
117 return 0;
118}
119
120/* find the context given to this process, and if it hasn't already
121 got one, go get one for it. */
122static inline void get_mmu_context(struct mm_struct *mm)
123{
124 if(mm->context == SUN3_INVALID_CONTEXT)
125 mm->context = get_free_context(mm);
126}
127
128/* flush context if allocated... */
129static inline void destroy_context(struct mm_struct *mm)
130{
131 if(mm->context != SUN3_INVALID_CONTEXT)
132 clear_context(mm->context);
133}
134
135static inline void activate_context(struct mm_struct *mm)
136{
137 get_mmu_context(mm);
138 sun3_put_context(mm->context);
139}
140
141static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
142{
143 activate_context(tsk->mm);
144}
145
146#define deactivate_mm(tsk,mm) do { } while (0)
147
148static inline void activate_mm(struct mm_struct *prev_mm,
149 struct mm_struct *next_mm)
150{
151 activate_context(next_mm);
152}
153
154#endif 305#endif
306
155#else /* !CONFIG_MMU */ 307#else /* !CONFIG_MMU */
156 308
157static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 309static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 45bd3f589bf0..e0fdd4d08075 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -8,6 +8,7 @@
8#define _PAGE_PRESENT 0x001 8#define _PAGE_PRESENT 0x001
9#define _PAGE_SHORT 0x002 9#define _PAGE_SHORT 0x002
10#define _PAGE_RONLY 0x004 10#define _PAGE_RONLY 0x004
11#define _PAGE_READWRITE 0x000
11#define _PAGE_ACCESSED 0x008 12#define _PAGE_ACCESSED 0x008
12#define _PAGE_DIRTY 0x010 13#define _PAGE_DIRTY 0x010
13#define _PAGE_SUPER 0x080 /* 68040 supervisor only */ 14#define _PAGE_SUPER 0x080 /* 68040 supervisor only */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index dfebb7c1e379..98baa82a8615 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -6,10 +6,10 @@
6#include <asm/page_offset.h> 6#include <asm/page_offset.h>
7 7
8/* PAGE_SHIFT determines the page size */ 8/* PAGE_SHIFT determines the page size */
9#ifndef CONFIG_SUN3 9#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
10#define PAGE_SHIFT (12) 10#define PAGE_SHIFT 13
11#else 11#else
12#define PAGE_SHIFT (13) 12#define PAGE_SHIFT 12
13#endif 13#endif
14#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 14#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
15#define PAGE_MASK (~(PAGE_SIZE-1)) 15#define PAGE_MASK (~(PAGE_SIZE-1))
@@ -36,6 +36,10 @@ typedef struct page *pgtable_t;
36#define __pgd(x) ((pgd_t) { (x) } ) 36#define __pgd(x) ((pgd_t) { (x) } )
37#define __pgprot(x) ((pgprot_t) { (x) } ) 37#define __pgprot(x) ((pgprot_t) { (x) } )
38 38
39extern unsigned long _rambase;
40extern unsigned long _ramstart;
41extern unsigned long _ramend;
42
39#endif /* !__ASSEMBLY__ */ 43#endif /* !__ASSEMBLY__ */
40 44
41#ifdef CONFIG_MMU 45#ifdef CONFIG_MMU
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index a8d1c60eb9ce..90595721185f 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -5,9 +5,6 @@
5 5
6extern unsigned long memory_start; 6extern unsigned long memory_start;
7extern unsigned long memory_end; 7extern unsigned long memory_end;
8extern unsigned long _rambase;
9extern unsigned long _ramstart;
10extern unsigned long _ramend;
11 8
12#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 9#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
13#define free_user_page(page, addr) free_page(addr) 10#define free_user_page(page, addr) free_page(addr)
diff --git a/arch/m68k/include/asm/page_offset.h b/arch/m68k/include/asm/page_offset.h
index 1780152d81da..82626a8f1d0a 100644
--- a/arch/m68k/include/asm/page_offset.h
+++ b/arch/m68k/include/asm/page_offset.h
@@ -1,11 +1,9 @@
1/* This handles the memory map.. */ 1/* This handles the memory map.. */
2 2
3#ifdef CONFIG_MMU 3#if defined(CONFIG_RAMBASE)
4#ifndef CONFIG_SUN3 4#define PAGE_OFFSET_RAW CONFIG_RAMBASE
5#define PAGE_OFFSET_RAW 0x00000000 5#elif defined(CONFIG_SUN3)
6#else
7#define PAGE_OFFSET_RAW 0x0E000000 6#define PAGE_OFFSET_RAW 0x0E000000
8#endif
9#else 7#else
10#define PAGE_OFFSET_RAW CONFIG_RAMBASE 8#define PAGE_OFFSET_RAW 0x00000000
11#endif 9#endif
diff --git a/arch/m68k/include/asm/pgalloc.h b/arch/m68k/include/asm/pgalloc.h
index c294aad8a900..37bee7e3223d 100644
--- a/arch/m68k/include/asm/pgalloc.h
+++ b/arch/m68k/include/asm/pgalloc.h
@@ -7,7 +7,9 @@
7 7
8#ifdef CONFIG_MMU 8#ifdef CONFIG_MMU
9#include <asm/virtconvert.h> 9#include <asm/virtconvert.h>
10#ifdef CONFIG_SUN3 10#if defined(CONFIG_COLDFIRE)
11#include <asm/mcf_pgalloc.h>
12#elif defined(CONFIG_SUN3)
11#include <asm/sun3_pgalloc.h> 13#include <asm/sun3_pgalloc.h>
12#else 14#else
13#include <asm/motorola_pgalloc.h> 15#include <asm/motorola_pgalloc.h>
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 87174c904d2b..dc35e0e106e4 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -40,6 +40,8 @@
40/* PGDIR_SHIFT determines what a third-level page table entry can map */ 40/* PGDIR_SHIFT determines what a third-level page table entry can map */
41#ifdef CONFIG_SUN3 41#ifdef CONFIG_SUN3
42#define PGDIR_SHIFT 17 42#define PGDIR_SHIFT 17
43#elif defined(CONFIG_COLDFIRE)
44#define PGDIR_SHIFT 22
43#else 45#else
44#define PGDIR_SHIFT 25 46#define PGDIR_SHIFT 25
45#endif 47#endif
@@ -54,6 +56,10 @@
54#define PTRS_PER_PTE 16 56#define PTRS_PER_PTE 16
55#define PTRS_PER_PMD 1 57#define PTRS_PER_PMD 1
56#define PTRS_PER_PGD 2048 58#define PTRS_PER_PGD 2048
59#elif defined(CONFIG_COLDFIRE)
60#define PTRS_PER_PTE 512
61#define PTRS_PER_PMD 1
62#define PTRS_PER_PGD 1024
57#else 63#else
58#define PTRS_PER_PTE 1024 64#define PTRS_PER_PTE 1024
59#define PTRS_PER_PMD 8 65#define PTRS_PER_PMD 8
@@ -66,12 +72,22 @@
66#ifdef CONFIG_SUN3 72#ifdef CONFIG_SUN3
67#define KMAP_START 0x0DC00000 73#define KMAP_START 0x0DC00000
68#define KMAP_END 0x0E000000 74#define KMAP_END 0x0E000000
75#elif defined(CONFIG_COLDFIRE)
76#define KMAP_START 0xe0000000
77#define KMAP_END 0xf0000000
69#else 78#else
70#define KMAP_START 0xd0000000 79#define KMAP_START 0xd0000000
71#define KMAP_END 0xf0000000 80#define KMAP_END 0xf0000000
72#endif 81#endif
73 82
74#ifndef CONFIG_SUN3 83#ifdef CONFIG_SUN3
84extern unsigned long m68k_vmalloc_end;
85#define VMALLOC_START 0x0f800000
86#define VMALLOC_END m68k_vmalloc_end
87#elif defined(CONFIG_COLDFIRE)
88#define VMALLOC_START 0xd0000000
89#define VMALLOC_END 0xe0000000
90#else
75/* Just any arbitrary offset to the start of the vmalloc VM area: the 91/* Just any arbitrary offset to the start of the vmalloc VM area: the
76 * current 8MB value just means that there will be a 8MB "hole" after the 92 * current 8MB value just means that there will be a 8MB "hole" after the
77 * physical memory until the kernel virtual memory starts. That means that 93 * physical memory until the kernel virtual memory starts. That means that
@@ -82,11 +98,7 @@
82#define VMALLOC_OFFSET (8*1024*1024) 98#define VMALLOC_OFFSET (8*1024*1024)
83#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 99#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
84#define VMALLOC_END KMAP_START 100#define VMALLOC_END KMAP_START
85#else 101#endif
86extern unsigned long m68k_vmalloc_end;
87#define VMALLOC_START 0x0f800000
88#define VMALLOC_END m68k_vmalloc_end
89#endif /* CONFIG_SUN3 */
90 102
91/* zero page used for uninitialized stuff */ 103/* zero page used for uninitialized stuff */
92extern void *empty_zero_page; 104extern void *empty_zero_page;
@@ -130,6 +142,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
130 142
131#ifdef CONFIG_SUN3 143#ifdef CONFIG_SUN3
132#include <asm/sun3_pgtable.h> 144#include <asm/sun3_pgtable.h>
145#elif defined(CONFIG_COLDFIRE)
146#include <asm/mcf_pgtable.h>
133#else 147#else
134#include <asm/motorola_pgtable.h> 148#include <asm/motorola_pgtable.h>
135#endif 149#endif
@@ -138,6 +152,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
138/* 152/*
139 * Macro to mark a page protection value as "uncacheable". 153 * Macro to mark a page protection value as "uncacheable".
140 */ 154 */
155#ifdef CONFIG_COLDFIRE
156# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
157#else
141#ifdef SUN3_PAGE_NOCACHE 158#ifdef SUN3_PAGE_NOCACHE
142# define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE 159# define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
143#else 160#else
@@ -152,6 +169,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
152 ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ 169 ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
153 : (prot))) 170 : (prot)))
154 171
172#endif /* CONFIG_COLDFIRE */
155#include <asm-generic/pgtable.h> 173#include <asm-generic/pgtable.h>
156#endif /* !__ASSEMBLY__ */ 174#endif /* !__ASSEMBLY__ */
157 175
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 568facf30276..46460fa15d5c 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -48,10 +48,12 @@ static inline void wrusp(unsigned long usp)
48 * so don't change it unless you know what you are doing. 48 * so don't change it unless you know what you are doing.
49 */ 49 */
50#ifdef CONFIG_MMU 50#ifdef CONFIG_MMU
51#ifndef CONFIG_SUN3 51#if defined(CONFIG_COLDFIRE)
52#define TASK_SIZE (0xF0000000UL) 52#define TASK_SIZE (0xC0000000UL)
53#else 53#elif defined(CONFIG_SUN3)
54#define TASK_SIZE (0x0E000000UL) 54#define TASK_SIZE (0x0E000000UL)
55#else
56#define TASK_SIZE (0xF0000000UL)
55#endif 57#endif
56#else 58#else
57#define TASK_SIZE (0xFFFFFFFFUL) 59#define TASK_SIZE (0xFFFFFFFFUL)
@@ -66,10 +68,12 @@ static inline void wrusp(unsigned long usp)
66 * space during mmap's. 68 * space during mmap's.
67 */ 69 */
68#ifdef CONFIG_MMU 70#ifdef CONFIG_MMU
69#ifndef CONFIG_SUN3 71#if defined(CONFIG_COLDFIRE)
70#define TASK_UNMAPPED_BASE 0xC0000000UL 72#define TASK_UNMAPPED_BASE 0x60000000UL
71#else 73#elif defined(CONFIG_SUN3)
72#define TASK_UNMAPPED_BASE 0x0A000000UL 74#define TASK_UNMAPPED_BASE 0x0A000000UL
75#else
76#define TASK_UNMAPPED_BASE 0xC0000000UL
73#endif 77#endif
74#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr) 78#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
75#else 79#else
@@ -88,14 +92,12 @@ struct thread_struct {
88 unsigned long fp[8*3]; 92 unsigned long fp[8*3];
89 unsigned long fpcntl[3]; /* fp control regs */ 93 unsigned long fpcntl[3]; /* fp control regs */
90 unsigned char fpstate[FPSTATESIZE]; /* floating point state */ 94 unsigned char fpstate[FPSTATESIZE]; /* floating point state */
91 struct thread_info info;
92}; 95};
93 96
94#define INIT_THREAD { \ 97#define INIT_THREAD { \
95 .ksp = sizeof(init_stack) + (unsigned long) init_stack, \ 98 .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
96 .sr = PS_S, \ 99 .sr = PS_S, \
97 .fs = __KERNEL_DS, \ 100 .fs = __KERNEL_DS, \
98 .info = INIT_THREAD_INFO(init_task), \
99} 101}
100 102
101#ifdef CONFIG_MMU 103#ifdef CONFIG_MMU
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index ee959219fdfe..0fa80e97ed2d 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -22,23 +22,26 @@ typedef struct {
22} mm_segment_t; 22} mm_segment_t;
23 23
24#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 24#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25#define USER_DS MAKE_MM_SEG(__USER_DS)
26#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
27 25
26#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
28/* 27/*
29 * Get/set the SFC/DFC registers for MOVES instructions 28 * Get/set the SFC/DFC registers for MOVES instructions
30 */ 29 */
30#define USER_DS MAKE_MM_SEG(__USER_DS)
31#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
31 32
32static inline mm_segment_t get_fs(void) 33static inline mm_segment_t get_fs(void)
33{ 34{
34#ifdef CONFIG_MMU
35 mm_segment_t _v; 35 mm_segment_t _v;
36 __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); 36 __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
37
38 return _v; 37 return _v;
39#else 38}
40 return USER_DS; 39
41#endif 40static inline void set_fs(mm_segment_t val)
41{
42 __asm__ __volatile__ ("movec %0,%/sfc\n\t"
43 "movec %0,%/dfc\n\t"
44 : /* no outputs */ : "r" (val.seg) : "memory");
42} 45}
43 46
44static inline mm_segment_t get_ds(void) 47static inline mm_segment_t get_ds(void)
@@ -47,14 +50,13 @@ static inline mm_segment_t get_ds(void)
47 return KERNEL_DS; 50 return KERNEL_DS;
48} 51}
49 52
50static inline void set_fs(mm_segment_t val) 53#else
51{ 54#define USER_DS MAKE_MM_SEG(TASK_SIZE)
52#ifdef CONFIG_MMU 55#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
53 __asm__ __volatile__ ("movec %0,%/sfc\n\t" 56#define get_ds() (KERNEL_DS)
54 "movec %0,%/dfc\n\t" 57#define get_fs() (current_thread_info()->addr_limit)
55 : /* no outputs */ : "r" (val.seg) : "memory"); 58#define set_fs(x) (current_thread_info()->addr_limit = (x))
56#endif 59#endif
57}
58 60
59#define segment_eq(a,b) ((a).seg == (b).seg) 61#define segment_eq(a,b) ((a).seg == (b).seg)
60 62
diff --git a/arch/m68k/include/asm/serial.h b/arch/m68k/include/asm/serial.h
index 2b90d6e69070..7267536adbcc 100644
--- a/arch/m68k/include/asm/serial.h
+++ b/arch/m68k/include/asm/serial.h
@@ -25,9 +25,11 @@
25#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF 25#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
26#endif 26#endif
27 27
28#ifdef CONFIG_ISA
28#define SERIAL_PORT_DFNS \ 29#define SERIAL_PORT_DFNS \
29 /* UART CLK PORT IRQ FLAGS */ \ 30 /* UART CLK PORT IRQ FLAGS */ \
30 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ 31 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
31 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ 32 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
32 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ 33 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
33 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ 34 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
35#endif
diff --git a/arch/m68k/include/asm/setup.h b/arch/m68k/include/asm/setup.h
index 4dfb3952b375..00c2c5397d37 100644
--- a/arch/m68k/include/asm/setup.h
+++ b/arch/m68k/include/asm/setup.h
@@ -40,6 +40,7 @@
40#define MACH_HP300 9 40#define MACH_HP300 9
41#define MACH_Q40 10 41#define MACH_Q40 10
42#define MACH_SUN3X 11 42#define MACH_SUN3X 11
43#define MACH_M54XX 12
43 44
44#define COMMAND_LINE_SIZE 256 45#define COMMAND_LINE_SIZE 256
45 46
@@ -211,23 +212,27 @@ extern unsigned long m68k_machtype;
211#define CPUB_68030 1 212#define CPUB_68030 1
212#define CPUB_68040 2 213#define CPUB_68040 2
213#define CPUB_68060 3 214#define CPUB_68060 3
215#define CPUB_COLDFIRE 4
214 216
215#define CPU_68020 (1<<CPUB_68020) 217#define CPU_68020 (1<<CPUB_68020)
216#define CPU_68030 (1<<CPUB_68030) 218#define CPU_68030 (1<<CPUB_68030)
217#define CPU_68040 (1<<CPUB_68040) 219#define CPU_68040 (1<<CPUB_68040)
218#define CPU_68060 (1<<CPUB_68060) 220#define CPU_68060 (1<<CPUB_68060)
221#define CPU_COLDFIRE (1<<CPUB_COLDFIRE)
219 222
220#define FPUB_68881 0 223#define FPUB_68881 0
221#define FPUB_68882 1 224#define FPUB_68882 1
222#define FPUB_68040 2 /* Internal FPU */ 225#define FPUB_68040 2 /* Internal FPU */
223#define FPUB_68060 3 /* Internal FPU */ 226#define FPUB_68060 3 /* Internal FPU */
224#define FPUB_SUNFPA 4 /* Sun-3 FPA */ 227#define FPUB_SUNFPA 4 /* Sun-3 FPA */
228#define FPUB_COLDFIRE 5 /* ColdFire FPU */
225 229
226#define FPU_68881 (1<<FPUB_68881) 230#define FPU_68881 (1<<FPUB_68881)
227#define FPU_68882 (1<<FPUB_68882) 231#define FPU_68882 (1<<FPUB_68882)
228#define FPU_68040 (1<<FPUB_68040) 232#define FPU_68040 (1<<FPUB_68040)
229#define FPU_68060 (1<<FPUB_68060) 233#define FPU_68060 (1<<FPUB_68060)
230#define FPU_SUNFPA (1<<FPUB_SUNFPA) 234#define FPU_SUNFPA (1<<FPUB_SUNFPA)
235#define FPU_COLDFIRE (1<<FPUB_COLDFIRE)
231 236
232#define MMUB_68851 0 237#define MMUB_68851 0
233#define MMUB_68030 1 /* Internal MMU */ 238#define MMUB_68030 1 /* Internal MMU */
@@ -235,6 +240,7 @@ extern unsigned long m68k_machtype;
235#define MMUB_68060 3 /* Internal MMU */ 240#define MMUB_68060 3 /* Internal MMU */
236#define MMUB_APOLLO 4 /* Custom Apollo */ 241#define MMUB_APOLLO 4 /* Custom Apollo */
237#define MMUB_SUN3 5 /* Custom Sun-3 */ 242#define MMUB_SUN3 5 /* Custom Sun-3 */
243#define MMUB_COLDFIRE 6 /* Internal MMU */
238 244
239#define MMU_68851 (1<<MMUB_68851) 245#define MMU_68851 (1<<MMUB_68851)
240#define MMU_68030 (1<<MMUB_68030) 246#define MMU_68030 (1<<MMUB_68030)
@@ -242,6 +248,7 @@ extern unsigned long m68k_machtype;
242#define MMU_68060 (1<<MMUB_68060) 248#define MMU_68060 (1<<MMUB_68060)
243#define MMU_SUN3 (1<<MMUB_SUN3) 249#define MMU_SUN3 (1<<MMUB_SUN3)
244#define MMU_APOLLO (1<<MMUB_APOLLO) 250#define MMU_APOLLO (1<<MMUB_APOLLO)
251#define MMU_COLDFIRE (1<<MMUB_COLDFIRE)
245 252
246#ifdef __KERNEL__ 253#ifdef __KERNEL__
247 254
@@ -341,6 +348,13 @@ extern int m68k_is040or060;
341# endif 348# endif
342#endif 349#endif
343 350
351#if !defined(CONFIG_COLDFIRE)
352# define CPU_IS_COLDFIRE (0)
353#else
354# define CPU_IS_COLDFIRE (1)
355# define MMU_IS_COLDFIRE (1)
356#endif
357
344#define CPU_TYPE (m68k_cputype) 358#define CPU_TYPE (m68k_cputype)
345 359
346#ifdef CONFIG_M68KFPU_EMU 360#ifdef CONFIG_M68KFPU_EMU
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h
index a29dd74a17cb..523db2a51cf3 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/asm/sigcontext.h
@@ -15,11 +15,7 @@ struct sigcontext {
15 unsigned long sc_pc; 15 unsigned long sc_pc;
16 unsigned short sc_formatvec; 16 unsigned short sc_formatvec;
17#ifndef __uClinux__ 17#ifndef __uClinux__
18# ifdef __mcoldfire__
19 unsigned long sc_fpregs[2][2]; /* room for two fp registers */
20# else
21 unsigned long sc_fpregs[2*3]; /* room for two fp registers */ 18 unsigned long sc_fpregs[2*3]; /* room for two fp registers */
22# endif
23 unsigned long sc_fpcntl[3]; 19 unsigned long sc_fpcntl[3];
24 unsigned char sc_fpstate[216]; 20 unsigned char sc_fpstate[216];
25#endif 21#endif
diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h
index 9bf49c87d954..d4708ce466e0 100644
--- a/arch/m68k/include/asm/socket.h
+++ b/arch/m68k/include/asm/socket.h
@@ -62,4 +62,7 @@
62 62
63#define SO_RXQ_OVFL 40 63#define SO_RXQ_OVFL 40
64 64
65#define SO_WIFI_STATUS 41
66#define SCM_WIFI_STATUS SO_WIFI_STATUS
67
65#endif /* _ASM_SOCKET_H */ 68#endif /* _ASM_SOCKET_H */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 790988967ba7..e8665e6f9464 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/page.h> 5#include <asm/page.h>
6#include <asm/segment.h>
6 7
7/* 8/*
8 * On machines with 4k pages we default to an 8k thread size, though we 9 * On machines with 4k pages we default to an 8k thread size, though we
@@ -26,6 +27,7 @@ struct thread_info {
26 struct task_struct *task; /* main task structure */ 27 struct task_struct *task; /* main task structure */
27 unsigned long flags; 28 unsigned long flags;
28 struct exec_domain *exec_domain; /* execution domain */ 29 struct exec_domain *exec_domain; /* execution domain */
30 mm_segment_t addr_limit; /* thread address space */
29 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 int preempt_count; /* 0 => preemptable, <0 => BUG */
30 __u32 cpu; /* should always be 0 on m68k */ 32 __u32 cpu; /* should always be 0 on m68k */
31 unsigned long tp_value; /* thread pointer */ 33 unsigned long tp_value; /* thread pointer */
@@ -39,6 +41,7 @@ struct thread_info {
39{ \ 41{ \
40 .task = &tsk, \ 42 .task = &tsk, \
41 .exec_domain = &default_exec_domain, \ 43 .exec_domain = &default_exec_domain, \
44 .addr_limit = KERNEL_DS, \
42 .preempt_count = INIT_PREEMPT_COUNT, \ 45 .preempt_count = INIT_PREEMPT_COUNT, \
43 .restart_block = { \ 46 .restart_block = { \
44 .fn = do_no_restart_syscall, \ 47 .fn = do_no_restart_syscall, \
@@ -47,34 +50,6 @@ struct thread_info {
47 50
48#define init_stack (init_thread_union.stack) 51#define init_stack (init_thread_union.stack)
49 52
50#ifdef CONFIG_MMU
51
52#ifndef __ASSEMBLY__
53#include <asm/current.h>
54#endif
55
56#ifdef ASM_OFFSETS_C
57#define task_thread_info(tsk) ((struct thread_info *) NULL)
58#else
59#include <asm/asm-offsets.h>
60#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
61#endif
62
63#define init_thread_info (init_task.thread.info)
64#define task_stack_page(tsk) ((tsk)->stack)
65#define current_thread_info() task_thread_info(current)
66
67#define __HAVE_THREAD_FUNCTIONS
68
69#define setup_thread_stack(p, org) ({ \
70 *(struct task_struct **)(p)->stack = (p); \
71 task_thread_info(p)->task = (p); \
72})
73
74#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
75
76#else /* !CONFIG_MMU */
77
78#ifndef __ASSEMBLY__ 53#ifndef __ASSEMBLY__
79/* how to get the thread information struct from C */ 54/* how to get the thread information struct from C */
80static inline struct thread_info *current_thread_info(void) 55static inline struct thread_info *current_thread_info(void)
@@ -92,8 +67,6 @@ static inline struct thread_info *current_thread_info(void)
92 67
93#define init_thread_info (init_thread_union.thread_info) 68#define init_thread_info (init_thread_union.thread_info)
94 69
95#endif /* CONFIG_MMU */
96
97/* entry.S relies on these definitions! 70/* entry.S relies on these definitions!
98 * bits 0-7 are tested at every exception exit 71 * bits 0-7 are tested at every exception exit
99 * bits 8-15 are also tested at syscall exit 72 * bits 8-15 are also tested at syscall exit
@@ -103,7 +76,6 @@ static inline struct thread_info *current_thread_info(void)
103#define TIF_DELAYED_TRACE 14 /* single step a syscall */ 76#define TIF_DELAYED_TRACE 14 /* single step a syscall */
104#define TIF_SYSCALL_TRACE 15 /* syscall trace active */ 77#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
105#define TIF_MEMDIE 16 /* is terminating due to OOM killer */ 78#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
106#define TIF_FREEZE 17 /* thread is freezing for suspend */
107#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */ 79#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
108 80
109#endif /* _ASM_M68K_THREAD_INFO_H */ 81#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index a6b4ed4fc90f..965ea35c9a40 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -5,10 +5,13 @@
5#ifndef CONFIG_SUN3 5#ifndef CONFIG_SUN3
6 6
7#include <asm/current.h> 7#include <asm/current.h>
8#include <asm/mcfmmu.h>
8 9
9static inline void flush_tlb_kernel_page(void *addr) 10static inline void flush_tlb_kernel_page(void *addr)
10{ 11{
11 if (CPU_IS_040_OR_060) { 12 if (CPU_IS_COLDFIRE) {
13 mmu_write(MMUOR, MMUOR_CNL);
14 } else if (CPU_IS_040_OR_060) {
12 mm_segment_t old_fs = get_fs(); 15 mm_segment_t old_fs = get_fs();
13 set_fs(KERNEL_DS); 16 set_fs(KERNEL_DS);
14 __asm__ __volatile__(".chip 68040\n\t" 17 __asm__ __volatile__(".chip 68040\n\t"
@@ -25,12 +28,15 @@ static inline void flush_tlb_kernel_page(void *addr)
25 */ 28 */
26static inline void __flush_tlb(void) 29static inline void __flush_tlb(void)
27{ 30{
28 if (CPU_IS_040_OR_060) 31 if (CPU_IS_COLDFIRE) {
32 mmu_write(MMUOR, MMUOR_CNL);
33 } else if (CPU_IS_040_OR_060) {
29 __asm__ __volatile__(".chip 68040\n\t" 34 __asm__ __volatile__(".chip 68040\n\t"
30 "pflushan\n\t" 35 "pflushan\n\t"
31 ".chip 68k"); 36 ".chip 68k");
32 else if (CPU_IS_020_OR_030) 37 } else if (CPU_IS_020_OR_030) {
33 __asm__ __volatile__("pflush #0,#4"); 38 __asm__ __volatile__("pflush #0,#4");
39 }
34} 40}
35 41
36static inline void __flush_tlb040_one(unsigned long addr) 42static inline void __flush_tlb040_one(unsigned long addr)
@@ -43,7 +49,9 @@ static inline void __flush_tlb040_one(unsigned long addr)
43 49
44static inline void __flush_tlb_one(unsigned long addr) 50static inline void __flush_tlb_one(unsigned long addr)
45{ 51{
46 if (CPU_IS_040_OR_060) 52 if (CPU_IS_COLDFIRE)
53 mmu_write(MMUOR, MMUOR_CNL);
54 else if (CPU_IS_040_OR_060)
47 __flush_tlb040_one(addr); 55 __flush_tlb040_one(addr);
48 else if (CPU_IS_020_OR_030) 56 else if (CPU_IS_020_OR_030)
49 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr)); 57 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
@@ -56,12 +64,15 @@ static inline void __flush_tlb_one(unsigned long addr)
56 */ 64 */
57static inline void flush_tlb_all(void) 65static inline void flush_tlb_all(void)
58{ 66{
59 if (CPU_IS_040_OR_060) 67 if (CPU_IS_COLDFIRE) {
68 mmu_write(MMUOR, MMUOR_CNL);
69 } else if (CPU_IS_040_OR_060) {
60 __asm__ __volatile__(".chip 68040\n\t" 70 __asm__ __volatile__(".chip 68040\n\t"
61 "pflusha\n\t" 71 "pflusha\n\t"
62 ".chip 68k"); 72 ".chip 68k");
63 else if (CPU_IS_020_OR_030) 73 } else if (CPU_IS_020_OR_030) {
64 __asm__ __volatile__("pflusha"); 74 __asm__ __volatile__("pflusha");
75 }
65} 76}
66 77
67static inline void flush_tlb_mm(struct mm_struct *mm) 78static inline void flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 151068f64f44..4aff3358fbaf 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -18,6 +18,7 @@
18 18
19typedef void (*e_vector)(void); 19typedef void (*e_vector)(void);
20extern e_vector vectors[]; 20extern e_vector vectors[];
21extern e_vector *_ramvec;
21 22
22asmlinkage void auto_inthandler(void); 23asmlinkage void auto_inthandler(void);
23asmlinkage void user_inthandler(void); 24asmlinkage void user_inthandler(void);
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
index b17fd115a4e7..89705adcbd52 100644
--- a/arch/m68k/include/asm/types.h
+++ b/arch/m68k/include/asm/types.h
@@ -10,12 +10,6 @@
10 */ 10 */
11#include <asm-generic/int-ll64.h> 11#include <asm-generic/int-ll64.h>
12 12
13#ifndef __ASSEMBLY__
14
15typedef unsigned short umode_t;
16
17#endif /* __ASSEMBLY__ */
18
19/* 13/*
20 * These aren't exported outside the kernel to avoid name space clashes 14 * These aren't exported outside the kernel to avoid name space clashes
21 */ 15 */
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 7107f3fbdbb6..9c80cd515b20 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -21,6 +21,22 @@ static inline int access_ok(int type, const void __user *addr,
21} 21}
22 22
23/* 23/*
24 * Not all varients of the 68k family support the notion of address spaces.
25 * The traditional 680x0 parts do, and they use the sfc/dfc registers and
26 * the "moves" instruction to access user space from kernel space. Other
27 * family members like ColdFire don't support this, and only have a single
28 * address space, and use the usual "move" instruction for user space access.
29 *
30 * Outside of this difference the user space access functions are the same.
31 * So lets keep the code simple and just define in what we need to use.
32 */
33#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
34#define MOVES "moves"
35#else
36#define MOVES "move"
37#endif
38
39/*
24 * The exception table consists of pairs of addresses: the first is the 40 * The exception table consists of pairs of addresses: the first is the
25 * address of an instruction that is allowed to fault, and the second is 41 * address of an instruction that is allowed to fault, and the second is
26 * the address at which the program should continue. No registers are 42 * the address at which the program should continue. No registers are
@@ -43,7 +59,7 @@ extern int __get_user_bad(void);
43 59
44#define __put_user_asm(res, x, ptr, bwl, reg, err) \ 60#define __put_user_asm(res, x, ptr, bwl, reg, err) \
45asm volatile ("\n" \ 61asm volatile ("\n" \
46 "1: moves."#bwl" %2,%1\n" \ 62 "1: "MOVES"."#bwl" %2,%1\n" \
47 "2:\n" \ 63 "2:\n" \
48 " .section .fixup,\"ax\"\n" \ 64 " .section .fixup,\"ax\"\n" \
49 " .even\n" \ 65 " .even\n" \
@@ -83,8 +99,8 @@ asm volatile ("\n" \
83 { \ 99 { \
84 const void __user *__pu_ptr = (ptr); \ 100 const void __user *__pu_ptr = (ptr); \
85 asm volatile ("\n" \ 101 asm volatile ("\n" \
86 "1: moves.l %2,(%1)+\n" \ 102 "1: "MOVES".l %2,(%1)+\n" \
87 "2: moves.l %R2,(%1)\n" \ 103 "2: "MOVES".l %R2,(%1)\n" \
88 "3:\n" \ 104 "3:\n" \
89 " .section .fixup,\"ax\"\n" \ 105 " .section .fixup,\"ax\"\n" \
90 " .even\n" \ 106 " .even\n" \
@@ -115,12 +131,12 @@ asm volatile ("\n" \
115#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 131#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
116 type __gu_val; \ 132 type __gu_val; \
117 asm volatile ("\n" \ 133 asm volatile ("\n" \
118 "1: moves."#bwl" %2,%1\n" \ 134 "1: "MOVES"."#bwl" %2,%1\n" \
119 "2:\n" \ 135 "2:\n" \
120 " .section .fixup,\"ax\"\n" \ 136 " .section .fixup,\"ax\"\n" \
121 " .even\n" \ 137 " .even\n" \
122 "10: move.l %3,%0\n" \ 138 "10: move.l %3,%0\n" \
123 " sub."#bwl" %1,%1\n" \ 139 " sub.l %1,%1\n" \
124 " jra 2b\n" \ 140 " jra 2b\n" \
125 " .previous\n" \ 141 " .previous\n" \
126 "\n" \ 142 "\n" \
@@ -152,8 +168,8 @@ asm volatile ("\n" \
152 const void *__gu_ptr = (ptr); \ 168 const void *__gu_ptr = (ptr); \
153 u64 __gu_val; \ 169 u64 __gu_val; \
154 asm volatile ("\n" \ 170 asm volatile ("\n" \
155 "1: moves.l (%2)+,%1\n" \ 171 "1: "MOVES".l (%2)+,%1\n" \
156 "2: moves.l (%2),%R1\n" \ 172 "2: "MOVES".l (%2),%R1\n" \
157 "3:\n" \ 173 "3:\n" \
158 " .section .fixup,\"ax\"\n" \ 174 " .section .fixup,\"ax\"\n" \
159 " .even\n" \ 175 " .even\n" \
@@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned
188 204
189#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ 205#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
190 asm volatile ("\n" \ 206 asm volatile ("\n" \
191 "1: moves."#s1" (%2)+,%3\n" \ 207 "1: "MOVES"."#s1" (%2)+,%3\n" \
192 " move."#s1" %3,(%1)+\n" \ 208 " move."#s1" %3,(%1)+\n" \
193 "2: moves."#s2" (%2)+,%3\n" \ 209 "2: "MOVES"."#s2" (%2)+,%3\n" \
194 " move."#s2" %3,(%1)+\n" \ 210 " move."#s2" %3,(%1)+\n" \
195 " .ifnc \""#s3"\",\"\"\n" \ 211 " .ifnc \""#s3"\",\"\"\n" \
196 "3: moves."#s3" (%2)+,%3\n" \ 212 "3: "MOVES"."#s3" (%2)+,%3\n" \
197 " move."#s3" %3,(%1)+\n" \ 213 " move."#s3" %3,(%1)+\n" \
198 " .endif\n" \ 214 " .endif\n" \
199 "4:\n" \ 215 "4:\n" \
@@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
269#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ 285#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
270 asm volatile ("\n" \ 286 asm volatile ("\n" \
271 " move."#s1" (%2)+,%3\n" \ 287 " move."#s1" (%2)+,%3\n" \
272 "11: moves."#s1" %3,(%1)+\n" \ 288 "11: "MOVES"."#s1" %3,(%1)+\n" \
273 "12: move."#s2" (%2)+,%3\n" \ 289 "12: move."#s2" (%2)+,%3\n" \
274 "21: moves."#s2" %3,(%1)+\n" \ 290 "21: "MOVES"."#s2" %3,(%1)+\n" \
275 "22:\n" \ 291 "22:\n" \
276 " .ifnc \""#s3"\",\"\"\n" \ 292 " .ifnc \""#s3"\",\"\"\n" \
277 " move."#s3" (%2)+,%3\n" \ 293 " move."#s3" (%2)+,%3\n" \
278 "31: moves."#s3" %3,(%1)+\n" \ 294 "31: "MOVES"."#s3" %3,(%1)+\n" \
279 "32:\n" \ 295 "32:\n" \
280 " .endif\n" \ 296 " .endif\n" \
281 "4:\n" \ 297 "4:\n" \
diff --git a/arch/m68k/include/asm/ucontext.h b/arch/m68k/include/asm/ucontext.h
index 00dcc5176c57..e4e22669edc0 100644
--- a/arch/m68k/include/asm/ucontext.h
+++ b/arch/m68k/include/asm/ucontext.h
@@ -7,11 +7,7 @@ typedef greg_t gregset_t[NGREG];
7 7
8typedef struct fpregset { 8typedef struct fpregset {
9 int f_fpcntl[3]; 9 int f_fpcntl[3];
10#ifdef __mcoldfire__
11 int f_fpregs[8][2];
12#else
13 int f_fpregs[8*3]; 10 int f_fpregs[8*3];
14#endif
15} fpregset_t; 11} fpregset_t;
16 12
17struct mcontext { 13struct mcontext {
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 43f984e93970..ea0b502f845e 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -132,10 +132,10 @@
132#define __NR_adjtimex 124 132#define __NR_adjtimex 124
133#define __NR_mprotect 125 133#define __NR_mprotect 125
134#define __NR_sigprocmask 126 134#define __NR_sigprocmask 126
135/*#define __NR_create_module 127*/ 135#define __NR_create_module 127
136#define __NR_init_module 128 136#define __NR_init_module 128
137#define __NR_delete_module 129 137#define __NR_delete_module 129
138/*#define __NR_get_kernel_syms 130*/ 138#define __NR_get_kernel_syms 130
139#define __NR_quotactl 131 139#define __NR_quotactl 131
140#define __NR_getpgid 132 140#define __NR_getpgid 132
141#define __NR_fchdir 133 141#define __NR_fchdir 133
@@ -172,7 +172,7 @@
172#define __NR_setresuid 164 172#define __NR_setresuid 164
173#define __NR_getresuid 165 173#define __NR_getresuid 165
174#define __NR_getpagesize 166 174#define __NR_getpagesize 166
175/*#define __NR_query_module 167*/ 175#define __NR_query_module 167
176#define __NR_poll 168 176#define __NR_poll 168
177#define __NR_nfsservctl 169 177#define __NR_nfsservctl 169
178#define __NR_setresgid 170 178#define __NR_setresgid 170
@@ -193,8 +193,8 @@
193#define __NR_capset 185 193#define __NR_capset 185
194#define __NR_sigaltstack 186 194#define __NR_sigaltstack 186
195#define __NR_sendfile 187 195#define __NR_sendfile 187
196/*#define __NR_getpmsg 188*/ /* some people actually want streams */ 196#define __NR_getpmsg 188 /* some people actually want streams */
197/*#define __NR_putpmsg 189*/ /* some people actually want streams */ 197#define __NR_putpmsg 189 /* some people actually want streams */
198#define __NR_vfork 190 198#define __NR_vfork 190
199#define __NR_ugetrlimit 191 199#define __NR_ugetrlimit 191
200#define __NR_mmap2 192 200#define __NR_mmap2 192
@@ -350,10 +350,12 @@
350#define __NR_clock_adjtime 342 350#define __NR_clock_adjtime 342
351#define __NR_syncfs 343 351#define __NR_syncfs 343
352#define __NR_setns 344 352#define __NR_setns 344
353#define __NR_process_vm_readv 345
354#define __NR_process_vm_writev 346
353 355
354#ifdef __KERNEL__ 356#ifdef __KERNEL__
355 357
356#define NR_syscalls 345 358#define NR_syscalls 347
357 359
358#define __ARCH_WANT_IPC_PARSE_VERSION 360#define __ARCH_WANT_IPC_PARSE_VERSION
359#define __ARCH_WANT_OLD_READDIR 361#define __ARCH_WANT_OLD_READDIR