diff options
Diffstat (limited to 'arch/microblaze')
68 files changed, 6124 insertions, 553 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 8cc312b5d4dc..b50b845fdd50 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -6,6 +6,7 @@ mainmenu "Linux/Microblaze Kernel Configuration" | |||
6 | config MICROBLAZE | 6 | config MICROBLAZE |
7 | def_bool y | 7 | def_bool y |
8 | select HAVE_LMB | 8 | select HAVE_LMB |
9 | select ARCH_WANT_OPTIONAL_GPIOLIB | ||
9 | 10 | ||
10 | config SWAP | 11 | config SWAP |
11 | def_bool n | 12 | def_bool n |
@@ -49,13 +50,14 @@ config GENERIC_CLOCKEVENTS | |||
49 | config GENERIC_HARDIRQS_NO__DO_IRQ | 50 | config GENERIC_HARDIRQS_NO__DO_IRQ |
50 | def_bool y | 51 | def_bool y |
51 | 52 | ||
53 | config GENERIC_GPIO | ||
54 | def_bool y | ||
55 | |||
52 | config PCI | 56 | config PCI |
53 | depends on !MMU | ||
54 | def_bool n | 57 | def_bool n |
55 | 58 | ||
56 | config NO_DMA | 59 | config NO_DMA |
57 | depends on !MMU | 60 | def_bool y |
58 | def_bool n | ||
59 | 61 | ||
60 | source "init/Kconfig" | 62 | source "init/Kconfig" |
61 | 63 | ||
@@ -72,7 +74,8 @@ source "kernel/Kconfig.preempt" | |||
72 | source "kernel/Kconfig.hz" | 74 | source "kernel/Kconfig.hz" |
73 | 75 | ||
74 | config MMU | 76 | config MMU |
75 | def_bool n | 77 | bool "MMU support" |
78 | default n | ||
76 | 79 | ||
77 | config NO_MMU | 80 | config NO_MMU |
78 | bool | 81 | bool |
@@ -105,9 +108,6 @@ config CMDLINE_FORCE | |||
105 | config OF | 108 | config OF |
106 | def_bool y | 109 | def_bool y |
107 | 110 | ||
108 | config OF_DEVICE | ||
109 | def_bool y | ||
110 | |||
111 | config PROC_DEVICETREE | 111 | config PROC_DEVICETREE |
112 | bool "Support for device tree in /proc" | 112 | bool "Support for device tree in /proc" |
113 | depends on PROC_FS | 113 | depends on PROC_FS |
@@ -118,6 +118,113 @@ config PROC_DEVICETREE | |||
118 | 118 | ||
119 | endmenu | 119 | endmenu |
120 | 120 | ||
121 | menu "Advanced setup" | ||
122 | |||
123 | config ADVANCED_OPTIONS | ||
124 | bool "Prompt for advanced kernel configuration options" | ||
125 | depends on MMU | ||
126 | help | ||
127 | This option will enable prompting for a variety of advanced kernel | ||
128 | configuration options. These options can cause the kernel to not | ||
129 | work if they are set incorrectly, but can be used to optimize certain | ||
130 | aspects of kernel memory management. | ||
131 | |||
132 | Unless you know what you are doing, say N here. | ||
133 | |||
134 | comment "Default settings for advanced configuration options are used" | ||
135 | depends on !ADVANCED_OPTIONS | ||
136 | |||
137 | config HIGHMEM_START_BOOL | ||
138 | bool "Set high memory pool address" | ||
139 | depends on ADVANCED_OPTIONS && HIGHMEM | ||
140 | help | ||
141 | This option allows you to set the base address of the kernel virtual | ||
142 | area used to map high memory pages. This can be useful in | ||
143 | optimizing the layout of kernel virtual memory. | ||
144 | |||
145 | Say N here unless you know what you are doing. | ||
146 | |||
147 | config HIGHMEM_START | ||
148 | hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL | ||
149 | depends on MMU | ||
150 | default "0xfe000000" | ||
151 | |||
152 | config LOWMEM_SIZE_BOOL | ||
153 | bool "Set maximum low memory" | ||
154 | depends on ADVANCED_OPTIONS | ||
155 | help | ||
156 | This option allows you to set the maximum amount of memory which | ||
157 | will be used as "low memory", that is, memory which the kernel can | ||
158 | access directly, without having to set up a kernel virtual mapping. | ||
159 | This can be useful in optimizing the layout of kernel virtual | ||
160 | memory. | ||
161 | |||
162 | Say N here unless you know what you are doing. | ||
163 | |||
164 | config LOWMEM_SIZE | ||
165 | hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL | ||
166 | depends on MMU | ||
167 | default "0x30000000" | ||
168 | |||
169 | config KERNEL_START_BOOL | ||
170 | bool "Set custom kernel base address" | ||
171 | depends on ADVANCED_OPTIONS | ||
172 | help | ||
173 | This option allows you to set the kernel virtual address at which | ||
174 | the kernel will map low memory (the kernel image will be linked at | ||
175 | this address). This can be useful in optimizing the virtual memory | ||
176 | layout of the system. | ||
177 | |||
178 | Say N here unless you know what you are doing. | ||
179 | |||
180 | config KERNEL_START | ||
181 | hex "Virtual address of kernel base" if KERNEL_START_BOOL | ||
182 | default "0xc0000000" if MMU | ||
183 | default KERNEL_BASE_ADDR if !MMU | ||
184 | |||
185 | config TASK_SIZE_BOOL | ||
186 | bool "Set custom user task size" | ||
187 | depends on ADVANCED_OPTIONS | ||
188 | help | ||
189 | This option allows you to set the amount of virtual address space | ||
190 | allocated to user tasks. This can be useful in optimizing the | ||
191 | virtual memory layout of the system. | ||
192 | |||
193 | Say N here unless you know what you are doing. | ||
194 | |||
195 | config TASK_SIZE | ||
196 | hex "Size of user task space" if TASK_SIZE_BOOL | ||
197 | depends on MMU | ||
198 | default "0x80000000" | ||
199 | |||
200 | config CONSISTENT_START_BOOL | ||
201 | bool "Set custom consistent memory pool address" | ||
202 | depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE | ||
203 | help | ||
204 | This option allows you to set the base virtual address | ||
205 | of the the consistent memory pool. This pool of virtual | ||
206 | memory is used to make consistent memory allocations. | ||
207 | |||
208 | config CONSISTENT_START | ||
209 | hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL | ||
210 | depends on MMU | ||
211 | default "0xff100000" if NOT_COHERENT_CACHE | ||
212 | |||
213 | config CONSISTENT_SIZE_BOOL | ||
214 | bool "Set custom consistent memory pool size" | ||
215 | depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE | ||
216 | help | ||
217 | This option allows you to set the size of the the | ||
218 | consistent memory pool. This pool of virtual memory | ||
219 | is used to make consistent memory allocations. | ||
220 | |||
221 | config CONSISTENT_SIZE | ||
222 | hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL | ||
223 | depends on MMU | ||
224 | default "0x00200000" if NOT_COHERENT_CACHE | ||
225 | |||
226 | endmenu | ||
227 | |||
121 | source "mm/Kconfig" | 228 | source "mm/Kconfig" |
122 | 229 | ||
123 | menu "Exectuable file formats" | 230 | menu "Exectuable file formats" |
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index aaadfa701da3..d0bcf80a1136 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile | |||
@@ -1,4 +1,8 @@ | |||
1 | ifeq ($(CONFIG_MMU),y) | ||
2 | UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\" | ||
3 | else | ||
1 | UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\" | 4 | UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\" |
5 | endif | ||
2 | 6 | ||
3 | # What CPU vesion are we building for, and crack it open | 7 | # What CPU vesion are we building for, and crack it open |
4 | # as major.minor.rev | 8 | # as major.minor.rev |
@@ -36,6 +40,8 @@ CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) | |||
36 | # r31 holds current when in kernel mode | 40 | # r31 holds current when in kernel mode |
37 | CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) | 41 | CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) |
38 | 42 | ||
43 | LDFLAGS := | ||
44 | LDFLAGS_vmlinux := | ||
39 | LDFLAGS_BLOB := --format binary --oformat elf32-microblaze | 45 | LDFLAGS_BLOB := --format binary --oformat elf32-microblaze |
40 | 46 | ||
41 | LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) | 47 | LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) |
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 844edf406d34..c2bb043a029d 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile | |||
@@ -7,6 +7,8 @@ targets := linux.bin linux.bin.gz | |||
7 | OBJCOPYFLAGS_linux.bin := -O binary | 7 | OBJCOPYFLAGS_linux.bin := -O binary |
8 | 8 | ||
9 | $(obj)/linux.bin: vmlinux FORCE | 9 | $(obj)/linux.bin: vmlinux FORCE |
10 | [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \ | ||
11 | touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image" | ||
10 | $(call if_changed,objcopy) | 12 | $(call if_changed,objcopy) |
11 | @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' | 13 | @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' |
12 | 14 | ||
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig new file mode 100644 index 000000000000..bd0b85ec38f5 --- /dev/null +++ b/arch/microblaze/configs/mmu_defconfig | |||
@@ -0,0 +1,798 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.30-rc6 | ||
4 | # Fri May 22 10:02:33 2009 | ||
5 | # | ||
6 | CONFIG_MICROBLAZE=y | ||
7 | # CONFIG_SWAP is not set | ||
8 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
9 | # CONFIG_ARCH_HAS_ILOG2_U32 is not set | ||
10 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | ||
11 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
12 | CONFIG_GENERIC_HWEIGHT=y | ||
13 | CONFIG_GENERIC_HARDIRQS=y | ||
14 | CONFIG_GENERIC_IRQ_PROBE=y | ||
15 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
16 | CONFIG_GENERIC_TIME=y | ||
17 | # CONFIG_GENERIC_TIME_VSYSCALL is not set | ||
18 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
19 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
20 | CONFIG_GENERIC_GPIO=y | ||
21 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
22 | |||
23 | # | ||
24 | # General setup | ||
25 | # | ||
26 | CONFIG_EXPERIMENTAL=y | ||
27 | CONFIG_BROKEN_ON_SMP=y | ||
28 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
29 | CONFIG_LOCALVERSION="" | ||
30 | CONFIG_LOCALVERSION_AUTO=y | ||
31 | CONFIG_SYSVIPC=y | ||
32 | CONFIG_SYSVIPC_SYSCTL=y | ||
33 | # CONFIG_POSIX_MQUEUE is not set | ||
34 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
35 | # CONFIG_TASKSTATS is not set | ||
36 | # CONFIG_AUDIT is not set | ||
37 | |||
38 | # | ||
39 | # RCU Subsystem | ||
40 | # | ||
41 | CONFIG_CLASSIC_RCU=y | ||
42 | # CONFIG_TREE_RCU is not set | ||
43 | # CONFIG_PREEMPT_RCU is not set | ||
44 | # CONFIG_TREE_RCU_TRACE is not set | ||
45 | # CONFIG_PREEMPT_RCU_TRACE is not set | ||
46 | CONFIG_IKCONFIG=y | ||
47 | CONFIG_IKCONFIG_PROC=y | ||
48 | CONFIG_LOG_BUF_SHIFT=17 | ||
49 | # CONFIG_GROUP_SCHED is not set | ||
50 | # CONFIG_CGROUPS is not set | ||
51 | CONFIG_SYSFS_DEPRECATED=y | ||
52 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
53 | # CONFIG_RELAY is not set | ||
54 | # CONFIG_NAMESPACES is not set | ||
55 | CONFIG_BLK_DEV_INITRD=y | ||
56 | CONFIG_INITRAMFS_SOURCE="rootfs.cpio" | ||
57 | CONFIG_INITRAMFS_ROOT_UID=0 | ||
58 | CONFIG_INITRAMFS_ROOT_GID=0 | ||
59 | CONFIG_RD_GZIP=y | ||
60 | # CONFIG_RD_BZIP2 is not set | ||
61 | # CONFIG_RD_LZMA is not set | ||
62 | CONFIG_INITRAMFS_COMPRESSION_NONE=y | ||
63 | # CONFIG_INITRAMFS_COMPRESSION_GZIP is not set | ||
64 | # CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set | ||
65 | # CONFIG_INITRAMFS_COMPRESSION_LZMA is not set | ||
66 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
67 | CONFIG_SYSCTL=y | ||
68 | CONFIG_ANON_INODES=y | ||
69 | CONFIG_EMBEDDED=y | ||
70 | CONFIG_SYSCTL_SYSCALL=y | ||
71 | CONFIG_KALLSYMS=y | ||
72 | CONFIG_KALLSYMS_ALL=y | ||
73 | CONFIG_KALLSYMS_EXTRA_PASS=y | ||
74 | # CONFIG_STRIP_ASM_SYMS is not set | ||
75 | # CONFIG_HOTPLUG is not set | ||
76 | CONFIG_PRINTK=y | ||
77 | CONFIG_BUG=y | ||
78 | CONFIG_ELF_CORE=y | ||
79 | # CONFIG_BASE_FULL is not set | ||
80 | # CONFIG_FUTEX is not set | ||
81 | # CONFIG_EPOLL is not set | ||
82 | # CONFIG_SIGNALFD is not set | ||
83 | CONFIG_TIMERFD=y | ||
84 | CONFIG_EVENTFD=y | ||
85 | # CONFIG_SHMEM is not set | ||
86 | CONFIG_AIO=y | ||
87 | CONFIG_VM_EVENT_COUNTERS=y | ||
88 | CONFIG_COMPAT_BRK=y | ||
89 | CONFIG_SLAB=y | ||
90 | # CONFIG_SLUB is not set | ||
91 | # CONFIG_SLOB is not set | ||
92 | # CONFIG_PROFILING is not set | ||
93 | # CONFIG_MARKERS is not set | ||
94 | # CONFIG_SLOW_WORK is not set | ||
95 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
96 | CONFIG_SLABINFO=y | ||
97 | CONFIG_BASE_SMALL=1 | ||
98 | CONFIG_MODULES=y | ||
99 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
100 | CONFIG_MODULE_UNLOAD=y | ||
101 | # CONFIG_MODULE_FORCE_UNLOAD is not set | ||
102 | # CONFIG_MODVERSIONS is not set | ||
103 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
104 | CONFIG_BLOCK=y | ||
105 | # CONFIG_LBD is not set | ||
106 | # CONFIG_BLK_DEV_BSG is not set | ||
107 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
108 | |||
109 | # | ||
110 | # IO Schedulers | ||
111 | # | ||
112 | CONFIG_IOSCHED_NOOP=y | ||
113 | CONFIG_IOSCHED_AS=y | ||
114 | CONFIG_IOSCHED_DEADLINE=y | ||
115 | CONFIG_IOSCHED_CFQ=y | ||
116 | # CONFIG_DEFAULT_AS is not set | ||
117 | # CONFIG_DEFAULT_DEADLINE is not set | ||
118 | CONFIG_DEFAULT_CFQ=y | ||
119 | # CONFIG_DEFAULT_NOOP is not set | ||
120 | CONFIG_DEFAULT_IOSCHED="cfq" | ||
121 | # CONFIG_FREEZER is not set | ||
122 | |||
123 | # | ||
124 | # Platform options | ||
125 | # | ||
126 | CONFIG_PLATFORM_GENERIC=y | ||
127 | CONFIG_OPT_LIB_FUNCTION=y | ||
128 | CONFIG_OPT_LIB_ASM=y | ||
129 | CONFIG_ALLOW_EDIT_AUTO=y | ||
130 | |||
131 | # | ||
132 | # Automatic platform settings from Kconfig.auto | ||
133 | # | ||
134 | |||
135 | # | ||
136 | # Definitions for MICROBLAZE0 | ||
137 | # | ||
138 | CONFIG_KERNEL_BASE_ADDR=0x90000000 | ||
139 | CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5" | ||
140 | CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 | ||
141 | CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 | ||
142 | CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 | ||
143 | CONFIG_XILINX_MICROBLAZE0_USE_DIV=1 | ||
144 | CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2 | ||
145 | CONFIG_XILINX_MICROBLAZE0_USE_FPU=2 | ||
146 | CONFIG_XILINX_MICROBLAZE0_HW_VER="7.10.d" | ||
147 | |||
148 | # | ||
149 | # Processor type and features | ||
150 | # | ||
151 | # CONFIG_NO_HZ is not set | ||
152 | # CONFIG_HIGH_RES_TIMERS is not set | ||
153 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
154 | CONFIG_PREEMPT_NONE=y | ||
155 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
156 | # CONFIG_PREEMPT is not set | ||
157 | CONFIG_HZ_100=y | ||
158 | # CONFIG_HZ_250 is not set | ||
159 | # CONFIG_HZ_300 is not set | ||
160 | # CONFIG_HZ_1000 is not set | ||
161 | CONFIG_HZ=100 | ||
162 | # CONFIG_SCHED_HRTICK is not set | ||
163 | CONFIG_MMU=y | ||
164 | |||
165 | # | ||
166 | # Boot options | ||
167 | # | ||
168 | CONFIG_CMDLINE_BOOL=y | ||
169 | CONFIG_CMDLINE="console=ttyUL0,115200" | ||
170 | CONFIG_CMDLINE_FORCE=y | ||
171 | CONFIG_OF=y | ||
172 | CONFIG_PROC_DEVICETREE=y | ||
173 | |||
174 | # | ||
175 | # Advanced setup | ||
176 | # | ||
177 | # CONFIG_ADVANCED_OPTIONS is not set | ||
178 | |||
179 | # | ||
180 | # Default settings for advanced configuration options are used | ||
181 | # | ||
182 | CONFIG_HIGHMEM_START=0xfe000000 | ||
183 | CONFIG_LOWMEM_SIZE=0x30000000 | ||
184 | CONFIG_KERNEL_START=0xc0000000 | ||
185 | CONFIG_TASK_SIZE=0x80000000 | ||
186 | CONFIG_SELECT_MEMORY_MODEL=y | ||
187 | CONFIG_FLATMEM_MANUAL=y | ||
188 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
189 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
190 | CONFIG_FLATMEM=y | ||
191 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
192 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
193 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
194 | # CONFIG_PHYS_ADDR_T_64BIT is not set | ||
195 | CONFIG_ZONE_DMA_FLAG=0 | ||
196 | CONFIG_VIRT_TO_BUS=y | ||
197 | CONFIG_UNEVICTABLE_LRU=y | ||
198 | CONFIG_HAVE_MLOCK=y | ||
199 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
200 | |||
201 | # | ||
202 | # Exectuable file formats | ||
203 | # | ||
204 | CONFIG_BINFMT_ELF=y | ||
205 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
206 | # CONFIG_HAVE_AOUT is not set | ||
207 | # CONFIG_BINFMT_MISC is not set | ||
208 | CONFIG_NET=y | ||
209 | |||
210 | # | ||
211 | # Networking options | ||
212 | # | ||
213 | CONFIG_PACKET=y | ||
214 | # CONFIG_PACKET_MMAP is not set | ||
215 | CONFIG_UNIX=y | ||
216 | CONFIG_XFRM=y | ||
217 | # CONFIG_XFRM_USER is not set | ||
218 | # CONFIG_XFRM_SUB_POLICY is not set | ||
219 | # CONFIG_XFRM_MIGRATE is not set | ||
220 | # CONFIG_XFRM_STATISTICS is not set | ||
221 | # CONFIG_NET_KEY is not set | ||
222 | CONFIG_INET=y | ||
223 | # CONFIG_IP_MULTICAST is not set | ||
224 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
225 | CONFIG_IP_FIB_HASH=y | ||
226 | # CONFIG_IP_PNP is not set | ||
227 | # CONFIG_NET_IPIP is not set | ||
228 | # CONFIG_NET_IPGRE is not set | ||
229 | # CONFIG_ARPD is not set | ||
230 | # CONFIG_SYN_COOKIES is not set | ||
231 | # CONFIG_INET_AH is not set | ||
232 | # CONFIG_INET_ESP is not set | ||
233 | # CONFIG_INET_IPCOMP is not set | ||
234 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
235 | # CONFIG_INET_TUNNEL is not set | ||
236 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
237 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
238 | CONFIG_INET_XFRM_MODE_BEET=y | ||
239 | # CONFIG_INET_LRO is not set | ||
240 | CONFIG_INET_DIAG=y | ||
241 | CONFIG_INET_TCP_DIAG=y | ||
242 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
243 | CONFIG_TCP_CONG_CUBIC=y | ||
244 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
245 | # CONFIG_TCP_MD5SIG is not set | ||
246 | # CONFIG_IPV6 is not set | ||
247 | # CONFIG_NETWORK_SECMARK is not set | ||
248 | # CONFIG_NETFILTER is not set | ||
249 | # CONFIG_IP_DCCP is not set | ||
250 | # CONFIG_IP_SCTP is not set | ||
251 | # CONFIG_TIPC is not set | ||
252 | # CONFIG_ATM is not set | ||
253 | # CONFIG_BRIDGE is not set | ||
254 | # CONFIG_NET_DSA is not set | ||
255 | # CONFIG_VLAN_8021Q is not set | ||
256 | # CONFIG_DECNET is not set | ||
257 | # CONFIG_LLC2 is not set | ||
258 | # CONFIG_IPX is not set | ||
259 | # CONFIG_ATALK is not set | ||
260 | # CONFIG_X25 is not set | ||
261 | # CONFIG_LAPB is not set | ||
262 | # CONFIG_ECONET is not set | ||
263 | # CONFIG_WAN_ROUTER is not set | ||
264 | # CONFIG_PHONET is not set | ||
265 | # CONFIG_NET_SCHED is not set | ||
266 | # CONFIG_DCB is not set | ||
267 | |||
268 | # | ||
269 | # Network testing | ||
270 | # | ||
271 | # CONFIG_NET_PKTGEN is not set | ||
272 | # CONFIG_HAMRADIO is not set | ||
273 | # CONFIG_CAN is not set | ||
274 | # CONFIG_IRDA is not set | ||
275 | # CONFIG_BT is not set | ||
276 | # CONFIG_AF_RXRPC is not set | ||
277 | # CONFIG_WIRELESS is not set | ||
278 | # CONFIG_WIMAX is not set | ||
279 | # CONFIG_RFKILL is not set | ||
280 | # CONFIG_NET_9P is not set | ||
281 | |||
282 | # | ||
283 | # Device Drivers | ||
284 | # | ||
285 | |||
286 | # | ||
287 | # Generic Driver Options | ||
288 | # | ||
289 | CONFIG_STANDALONE=y | ||
290 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
291 | # CONFIG_DEBUG_DRIVER is not set | ||
292 | # CONFIG_DEBUG_DEVRES is not set | ||
293 | # CONFIG_SYS_HYPERVISOR is not set | ||
294 | # CONFIG_CONNECTOR is not set | ||
295 | # CONFIG_MTD is not set | ||
296 | CONFIG_OF_DEVICE=y | ||
297 | # CONFIG_PARPORT is not set | ||
298 | CONFIG_BLK_DEV=y | ||
299 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
300 | # CONFIG_BLK_DEV_LOOP is not set | ||
301 | # CONFIG_BLK_DEV_NBD is not set | ||
302 | CONFIG_BLK_DEV_RAM=y | ||
303 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
304 | CONFIG_BLK_DEV_RAM_SIZE=8192 | ||
305 | # CONFIG_BLK_DEV_XIP is not set | ||
306 | # CONFIG_CDROM_PKTCDVD is not set | ||
307 | # CONFIG_ATA_OVER_ETH is not set | ||
308 | # CONFIG_XILINX_SYSACE is not set | ||
309 | CONFIG_MISC_DEVICES=y | ||
310 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
311 | # CONFIG_C2PORT is not set | ||
312 | |||
313 | # | ||
314 | # EEPROM support | ||
315 | # | ||
316 | # CONFIG_EEPROM_93CX6 is not set | ||
317 | |||
318 | # | ||
319 | # SCSI device support | ||
320 | # | ||
321 | # CONFIG_RAID_ATTRS is not set | ||
322 | # CONFIG_SCSI is not set | ||
323 | # CONFIG_SCSI_DMA is not set | ||
324 | # CONFIG_SCSI_NETLINK is not set | ||
325 | # CONFIG_ATA is not set | ||
326 | # CONFIG_MD is not set | ||
327 | CONFIG_NETDEVICES=y | ||
328 | CONFIG_COMPAT_NET_DEV_OPS=y | ||
329 | # CONFIG_DUMMY is not set | ||
330 | # CONFIG_BONDING is not set | ||
331 | # CONFIG_MACVLAN is not set | ||
332 | # CONFIG_EQUALIZER is not set | ||
333 | # CONFIG_TUN is not set | ||
334 | # CONFIG_VETH is not set | ||
335 | # CONFIG_PHYLIB is not set | ||
336 | CONFIG_NET_ETHERNET=y | ||
337 | # CONFIG_MII is not set | ||
338 | # CONFIG_ETHOC is not set | ||
339 | # CONFIG_DNET is not set | ||
340 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
341 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
342 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
343 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
344 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
345 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
346 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
347 | # CONFIG_B44 is not set | ||
348 | CONFIG_NETDEV_1000=y | ||
349 | CONFIG_NETDEV_10000=y | ||
350 | |||
351 | # | ||
352 | # Wireless LAN | ||
353 | # | ||
354 | # CONFIG_WLAN_PRE80211 is not set | ||
355 | # CONFIG_WLAN_80211 is not set | ||
356 | |||
357 | # | ||
358 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
359 | # | ||
360 | # CONFIG_WAN is not set | ||
361 | # CONFIG_PPP is not set | ||
362 | # CONFIG_SLIP is not set | ||
363 | # CONFIG_NETCONSOLE is not set | ||
364 | # CONFIG_NETPOLL is not set | ||
365 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
366 | # CONFIG_ISDN is not set | ||
367 | # CONFIG_PHONE is not set | ||
368 | |||
369 | # | ||
370 | # Input device support | ||
371 | # | ||
372 | # CONFIG_INPUT is not set | ||
373 | |||
374 | # | ||
375 | # Hardware I/O ports | ||
376 | # | ||
377 | # CONFIG_SERIO is not set | ||
378 | # CONFIG_GAMEPORT is not set | ||
379 | |||
380 | # | ||
381 | # Character devices | ||
382 | # | ||
383 | # CONFIG_VT is not set | ||
384 | CONFIG_DEVKMEM=y | ||
385 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
386 | |||
387 | # | ||
388 | # Serial drivers | ||
389 | # | ||
390 | # CONFIG_SERIAL_8250 is not set | ||
391 | |||
392 | # | ||
393 | # Non-8250 serial port support | ||
394 | # | ||
395 | CONFIG_SERIAL_UARTLITE=y | ||
396 | CONFIG_SERIAL_UARTLITE_CONSOLE=y | ||
397 | CONFIG_SERIAL_CORE=y | ||
398 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
399 | CONFIG_UNIX98_PTYS=y | ||
400 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
401 | CONFIG_LEGACY_PTYS=y | ||
402 | CONFIG_LEGACY_PTY_COUNT=256 | ||
403 | # CONFIG_IPMI_HANDLER is not set | ||
404 | # CONFIG_HW_RANDOM is not set | ||
405 | # CONFIG_RTC is not set | ||
406 | # CONFIG_GEN_RTC is not set | ||
407 | # CONFIG_XILINX_HWICAP is not set | ||
408 | # CONFIG_R3964 is not set | ||
409 | # CONFIG_RAW_DRIVER is not set | ||
410 | # CONFIG_TCG_TPM is not set | ||
411 | # CONFIG_I2C is not set | ||
412 | # CONFIG_SPI is not set | ||
413 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
414 | # CONFIG_GPIOLIB is not set | ||
415 | # CONFIG_W1 is not set | ||
416 | # CONFIG_POWER_SUPPLY is not set | ||
417 | # CONFIG_HWMON is not set | ||
418 | # CONFIG_THERMAL is not set | ||
419 | # CONFIG_THERMAL_HWMON is not set | ||
420 | # CONFIG_WATCHDOG is not set | ||
421 | CONFIG_SSB_POSSIBLE=y | ||
422 | |||
423 | # | ||
424 | # Sonics Silicon Backplane | ||
425 | # | ||
426 | # CONFIG_SSB is not set | ||
427 | |||
428 | # | ||
429 | # Multifunction device drivers | ||
430 | # | ||
431 | # CONFIG_MFD_CORE is not set | ||
432 | # CONFIG_MFD_SM501 is not set | ||
433 | # CONFIG_HTC_PASIC3 is not set | ||
434 | # CONFIG_MFD_TMIO is not set | ||
435 | # CONFIG_REGULATOR is not set | ||
436 | |||
437 | # | ||
438 | # Multimedia devices | ||
439 | # | ||
440 | |||
441 | # | ||
442 | # Multimedia core support | ||
443 | # | ||
444 | # CONFIG_VIDEO_DEV is not set | ||
445 | # CONFIG_DVB_CORE is not set | ||
446 | # CONFIG_VIDEO_MEDIA is not set | ||
447 | |||
448 | # | ||
449 | # Multimedia drivers | ||
450 | # | ||
451 | # CONFIG_DAB is not set | ||
452 | |||
453 | # | ||
454 | # Graphics support | ||
455 | # | ||
456 | # CONFIG_VGASTATE is not set | ||
457 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
458 | # CONFIG_FB is not set | ||
459 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
460 | |||
461 | # | ||
462 | # Display device support | ||
463 | # | ||
464 | # CONFIG_DISPLAY_SUPPORT is not set | ||
465 | # CONFIG_SOUND is not set | ||
466 | # CONFIG_USB_SUPPORT is not set | ||
467 | # CONFIG_MMC is not set | ||
468 | # CONFIG_MEMSTICK is not set | ||
469 | # CONFIG_NEW_LEDS is not set | ||
470 | # CONFIG_ACCESSIBILITY is not set | ||
471 | # CONFIG_RTC_CLASS is not set | ||
472 | # CONFIG_DMADEVICES is not set | ||
473 | # CONFIG_AUXDISPLAY is not set | ||
474 | # CONFIG_UIO is not set | ||
475 | # CONFIG_STAGING is not set | ||
476 | |||
477 | # | ||
478 | # File systems | ||
479 | # | ||
480 | CONFIG_EXT2_FS=y | ||
481 | # CONFIG_EXT2_FS_XATTR is not set | ||
482 | # CONFIG_EXT2_FS_XIP is not set | ||
483 | # CONFIG_EXT3_FS is not set | ||
484 | # CONFIG_EXT4_FS is not set | ||
485 | # CONFIG_REISERFS_FS is not set | ||
486 | # CONFIG_JFS_FS is not set | ||
487 | # CONFIG_FS_POSIX_ACL is not set | ||
488 | CONFIG_FILE_LOCKING=y | ||
489 | # CONFIG_XFS_FS is not set | ||
490 | # CONFIG_OCFS2_FS is not set | ||
491 | # CONFIG_BTRFS_FS is not set | ||
492 | # CONFIG_DNOTIFY is not set | ||
493 | # CONFIG_INOTIFY is not set | ||
494 | # CONFIG_QUOTA is not set | ||
495 | # CONFIG_AUTOFS_FS is not set | ||
496 | # CONFIG_AUTOFS4_FS is not set | ||
497 | # CONFIG_FUSE_FS is not set | ||
498 | |||
499 | # | ||
500 | # Caches | ||
501 | # | ||
502 | # CONFIG_FSCACHE is not set | ||
503 | |||
504 | # | ||
505 | # CD-ROM/DVD Filesystems | ||
506 | # | ||
507 | # CONFIG_ISO9660_FS is not set | ||
508 | # CONFIG_UDF_FS is not set | ||
509 | |||
510 | # | ||
511 | # DOS/FAT/NT Filesystems | ||
512 | # | ||
513 | # CONFIG_MSDOS_FS is not set | ||
514 | # CONFIG_VFAT_FS is not set | ||
515 | # CONFIG_NTFS_FS is not set | ||
516 | |||
517 | # | ||
518 | # Pseudo filesystems | ||
519 | # | ||
520 | CONFIG_PROC_FS=y | ||
521 | # CONFIG_PROC_KCORE is not set | ||
522 | CONFIG_PROC_SYSCTL=y | ||
523 | CONFIG_PROC_PAGE_MONITOR=y | ||
524 | CONFIG_SYSFS=y | ||
525 | CONFIG_TMPFS=y | ||
526 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
527 | # CONFIG_HUGETLB_PAGE is not set | ||
528 | # CONFIG_CONFIGFS_FS is not set | ||
529 | CONFIG_MISC_FILESYSTEMS=y | ||
530 | # CONFIG_ADFS_FS is not set | ||
531 | # CONFIG_AFFS_FS is not set | ||
532 | # CONFIG_HFS_FS is not set | ||
533 | # CONFIG_HFSPLUS_FS is not set | ||
534 | # CONFIG_BEFS_FS is not set | ||
535 | # CONFIG_BFS_FS is not set | ||
536 | # CONFIG_EFS_FS is not set | ||
537 | # CONFIG_CRAMFS is not set | ||
538 | # CONFIG_SQUASHFS is not set | ||
539 | # CONFIG_VXFS_FS is not set | ||
540 | # CONFIG_MINIX_FS is not set | ||
541 | # CONFIG_OMFS_FS is not set | ||
542 | # CONFIG_HPFS_FS is not set | ||
543 | # CONFIG_QNX4FS_FS is not set | ||
544 | # CONFIG_ROMFS_FS is not set | ||
545 | # CONFIG_SYSV_FS is not set | ||
546 | # CONFIG_UFS_FS is not set | ||
547 | # CONFIG_NILFS2_FS is not set | ||
548 | CONFIG_NETWORK_FILESYSTEMS=y | ||
549 | CONFIG_NFS_FS=y | ||
550 | CONFIG_NFS_V3=y | ||
551 | # CONFIG_NFS_V3_ACL is not set | ||
552 | # CONFIG_NFS_V4 is not set | ||
553 | # CONFIG_NFSD is not set | ||
554 | CONFIG_LOCKD=y | ||
555 | CONFIG_LOCKD_V4=y | ||
556 | CONFIG_NFS_COMMON=y | ||
557 | CONFIG_SUNRPC=y | ||
558 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
559 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
560 | # CONFIG_SMB_FS is not set | ||
561 | CONFIG_CIFS=y | ||
562 | CONFIG_CIFS_STATS=y | ||
563 | CONFIG_CIFS_STATS2=y | ||
564 | # CONFIG_CIFS_WEAK_PW_HASH is not set | ||
565 | # CONFIG_CIFS_XATTR is not set | ||
566 | # CONFIG_CIFS_DEBUG2 is not set | ||
567 | # CONFIG_CIFS_EXPERIMENTAL is not set | ||
568 | # CONFIG_NCP_FS is not set | ||
569 | # CONFIG_CODA_FS is not set | ||
570 | # CONFIG_AFS_FS is not set | ||
571 | |||
572 | # | ||
573 | # Partition Types | ||
574 | # | ||
575 | CONFIG_PARTITION_ADVANCED=y | ||
576 | # CONFIG_ACORN_PARTITION is not set | ||
577 | # CONFIG_OSF_PARTITION is not set | ||
578 | # CONFIG_AMIGA_PARTITION is not set | ||
579 | # CONFIG_ATARI_PARTITION is not set | ||
580 | # CONFIG_MAC_PARTITION is not set | ||
581 | CONFIG_MSDOS_PARTITION=y | ||
582 | # CONFIG_BSD_DISKLABEL is not set | ||
583 | # CONFIG_MINIX_SUBPARTITION is not set | ||
584 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
585 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
586 | # CONFIG_LDM_PARTITION is not set | ||
587 | # CONFIG_SGI_PARTITION is not set | ||
588 | # CONFIG_ULTRIX_PARTITION is not set | ||
589 | # CONFIG_SUN_PARTITION is not set | ||
590 | # CONFIG_KARMA_PARTITION is not set | ||
591 | # CONFIG_EFI_PARTITION is not set | ||
592 | # CONFIG_SYSV68_PARTITION is not set | ||
593 | CONFIG_NLS=y | ||
594 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
595 | # CONFIG_NLS_CODEPAGE_437 is not set | ||
596 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
597 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
598 | # CONFIG_NLS_CODEPAGE_850 is not set | ||
599 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
600 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
601 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
602 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
603 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
604 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
605 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
606 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
607 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
608 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
609 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
610 | # CONFIG_NLS_CODEPAGE_936 is not set | ||
611 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
612 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
613 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
614 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
615 | # CONFIG_NLS_ISO8859_8 is not set | ||
616 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
617 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
618 | # CONFIG_NLS_ASCII is not set | ||
619 | # CONFIG_NLS_ISO8859_1 is not set | ||
620 | # CONFIG_NLS_ISO8859_2 is not set | ||
621 | # CONFIG_NLS_ISO8859_3 is not set | ||
622 | # CONFIG_NLS_ISO8859_4 is not set | ||
623 | # CONFIG_NLS_ISO8859_5 is not set | ||
624 | # CONFIG_NLS_ISO8859_6 is not set | ||
625 | # CONFIG_NLS_ISO8859_7 is not set | ||
626 | # CONFIG_NLS_ISO8859_9 is not set | ||
627 | # CONFIG_NLS_ISO8859_13 is not set | ||
628 | # CONFIG_NLS_ISO8859_14 is not set | ||
629 | # CONFIG_NLS_ISO8859_15 is not set | ||
630 | # CONFIG_NLS_KOI8_R is not set | ||
631 | # CONFIG_NLS_KOI8_U is not set | ||
632 | # CONFIG_NLS_UTF8 is not set | ||
633 | # CONFIG_DLM is not set | ||
634 | |||
635 | # | ||
636 | # Kernel hacking | ||
637 | # | ||
638 | # CONFIG_PRINTK_TIME is not set | ||
639 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
640 | CONFIG_ENABLE_MUST_CHECK=y | ||
641 | CONFIG_FRAME_WARN=1024 | ||
642 | # CONFIG_MAGIC_SYSRQ is not set | ||
643 | # CONFIG_UNUSED_SYMBOLS is not set | ||
644 | # CONFIG_DEBUG_FS is not set | ||
645 | # CONFIG_HEADERS_CHECK is not set | ||
646 | CONFIG_DEBUG_KERNEL=y | ||
647 | # CONFIG_DEBUG_SHIRQ is not set | ||
648 | CONFIG_DETECT_SOFTLOCKUP=y | ||
649 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | ||
650 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | ||
651 | CONFIG_DETECT_HUNG_TASK=y | ||
652 | # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | ||
653 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | ||
654 | CONFIG_SCHED_DEBUG=y | ||
655 | # CONFIG_SCHEDSTATS is not set | ||
656 | # CONFIG_TIMER_STATS is not set | ||
657 | # CONFIG_DEBUG_OBJECTS is not set | ||
658 | CONFIG_DEBUG_SLAB=y | ||
659 | # CONFIG_DEBUG_SLAB_LEAK is not set | ||
660 | CONFIG_DEBUG_SPINLOCK=y | ||
661 | # CONFIG_DEBUG_MUTEXES is not set | ||
662 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
663 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
664 | # CONFIG_DEBUG_KOBJECT is not set | ||
665 | CONFIG_DEBUG_INFO=y | ||
666 | # CONFIG_DEBUG_VM is not set | ||
667 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
668 | # CONFIG_DEBUG_MEMORY_INIT is not set | ||
669 | # CONFIG_DEBUG_LIST is not set | ||
670 | # CONFIG_DEBUG_SG is not set | ||
671 | # CONFIG_DEBUG_NOTIFIERS is not set | ||
672 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
673 | # CONFIG_RCU_TORTURE_TEST is not set | ||
674 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
675 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
676 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
677 | # CONFIG_FAULT_INJECTION is not set | ||
678 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
679 | # CONFIG_PAGE_POISONING is not set | ||
680 | # CONFIG_SAMPLES is not set | ||
681 | CONFIG_EARLY_PRINTK=y | ||
682 | CONFIG_HEART_BEAT=y | ||
683 | CONFIG_DEBUG_BOOTMEM=y | ||
684 | |||
685 | # | ||
686 | # Security options | ||
687 | # | ||
688 | # CONFIG_KEYS is not set | ||
689 | # CONFIG_SECURITY is not set | ||
690 | # CONFIG_SECURITYFS is not set | ||
691 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
692 | CONFIG_CRYPTO=y | ||
693 | |||
694 | # | ||
695 | # Crypto core or helper | ||
696 | # | ||
697 | # CONFIG_CRYPTO_FIPS is not set | ||
698 | # CONFIG_CRYPTO_MANAGER is not set | ||
699 | # CONFIG_CRYPTO_MANAGER2 is not set | ||
700 | # CONFIG_CRYPTO_GF128MUL is not set | ||
701 | # CONFIG_CRYPTO_NULL is not set | ||
702 | # CONFIG_CRYPTO_CRYPTD is not set | ||
703 | # CONFIG_CRYPTO_AUTHENC is not set | ||
704 | # CONFIG_CRYPTO_TEST is not set | ||
705 | |||
706 | # | ||
707 | # Authenticated Encryption with Associated Data | ||
708 | # | ||
709 | # CONFIG_CRYPTO_CCM is not set | ||
710 | # CONFIG_CRYPTO_GCM is not set | ||
711 | # CONFIG_CRYPTO_SEQIV is not set | ||
712 | |||
713 | # | ||
714 | # Block modes | ||
715 | # | ||
716 | # CONFIG_CRYPTO_CBC is not set | ||
717 | # CONFIG_CRYPTO_CTR is not set | ||
718 | # CONFIG_CRYPTO_CTS is not set | ||
719 | # CONFIG_CRYPTO_ECB is not set | ||
720 | # CONFIG_CRYPTO_LRW is not set | ||
721 | # CONFIG_CRYPTO_PCBC is not set | ||
722 | # CONFIG_CRYPTO_XTS is not set | ||
723 | |||
724 | # | ||
725 | # Hash modes | ||
726 | # | ||
727 | # CONFIG_CRYPTO_HMAC is not set | ||
728 | # CONFIG_CRYPTO_XCBC is not set | ||
729 | |||
730 | # | ||
731 | # Digest | ||
732 | # | ||
733 | # CONFIG_CRYPTO_CRC32C is not set | ||
734 | # CONFIG_CRYPTO_MD4 is not set | ||
735 | # CONFIG_CRYPTO_MD5 is not set | ||
736 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
737 | # CONFIG_CRYPTO_RMD128 is not set | ||
738 | # CONFIG_CRYPTO_RMD160 is not set | ||
739 | # CONFIG_CRYPTO_RMD256 is not set | ||
740 | # CONFIG_CRYPTO_RMD320 is not set | ||
741 | # CONFIG_CRYPTO_SHA1 is not set | ||
742 | # CONFIG_CRYPTO_SHA256 is not set | ||
743 | # CONFIG_CRYPTO_SHA512 is not set | ||
744 | # CONFIG_CRYPTO_TGR192 is not set | ||
745 | # CONFIG_CRYPTO_WP512 is not set | ||
746 | |||
747 | # | ||
748 | # Ciphers | ||
749 | # | ||
750 | # CONFIG_CRYPTO_AES is not set | ||
751 | # CONFIG_CRYPTO_ANUBIS is not set | ||
752 | # CONFIG_CRYPTO_ARC4 is not set | ||
753 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
754 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
755 | # CONFIG_CRYPTO_CAST5 is not set | ||
756 | # CONFIG_CRYPTO_CAST6 is not set | ||
757 | # CONFIG_CRYPTO_DES is not set | ||
758 | # CONFIG_CRYPTO_FCRYPT is not set | ||
759 | # CONFIG_CRYPTO_KHAZAD is not set | ||
760 | # CONFIG_CRYPTO_SALSA20 is not set | ||
761 | # CONFIG_CRYPTO_SEED is not set | ||
762 | # CONFIG_CRYPTO_SERPENT is not set | ||
763 | # CONFIG_CRYPTO_TEA is not set | ||
764 | # CONFIG_CRYPTO_TWOFISH is not set | ||
765 | |||
766 | # | ||
767 | # Compression | ||
768 | # | ||
769 | # CONFIG_CRYPTO_DEFLATE is not set | ||
770 | # CONFIG_CRYPTO_ZLIB is not set | ||
771 | # CONFIG_CRYPTO_LZO is not set | ||
772 | |||
773 | # | ||
774 | # Random Number Generation | ||
775 | # | ||
776 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
777 | CONFIG_CRYPTO_HW=y | ||
778 | # CONFIG_BINARY_PRINTF is not set | ||
779 | |||
780 | # | ||
781 | # Library routines | ||
782 | # | ||
783 | CONFIG_BITREVERSE=y | ||
784 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
785 | # CONFIG_CRC_CCITT is not set | ||
786 | # CONFIG_CRC16 is not set | ||
787 | # CONFIG_CRC_T10DIF is not set | ||
788 | # CONFIG_CRC_ITU_T is not set | ||
789 | CONFIG_CRC32=y | ||
790 | # CONFIG_CRC7 is not set | ||
791 | # CONFIG_LIBCRC32C is not set | ||
792 | CONFIG_ZLIB_INFLATE=y | ||
793 | CONFIG_DECOMPRESS_GZIP=y | ||
794 | CONFIG_HAS_IOMEM=y | ||
795 | CONFIG_HAS_IOPORT=y | ||
796 | CONFIG_HAS_DMA=y | ||
797 | CONFIG_HAVE_LMB=y | ||
798 | CONFIG_NLATTR=y | ||
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 31820dfef56b..db5294c30caf 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
@@ -1,26 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += auxvec.h | 3 | header-y += elf.h |
4 | header-y += errno.h | ||
5 | header-y += fcntl.h | ||
6 | header-y += ioctl.h | ||
7 | header-y += ioctls.h | ||
8 | header-y += ipcbuf.h | ||
9 | header-y += linkage.h | ||
10 | header-y += msgbuf.h | ||
11 | header-y += poll.h | ||
12 | header-y += resource.h | ||
13 | header-y += sembuf.h | ||
14 | header-y += shmbuf.h | ||
15 | header-y += sigcontext.h | ||
16 | header-y += siginfo.h | ||
17 | header-y += socket.h | ||
18 | header-y += sockios.h | ||
19 | header-y += statfs.h | ||
20 | header-y += stat.h | ||
21 | header-y += termbits.h | ||
22 | header-y += ucontext.h | ||
23 | |||
24 | unifdef-y += cputable.h | ||
25 | unifdef-y += elf.h | ||
26 | unifdef-y += termios.h | ||
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h index a448d94ab721..0de612ad7cb2 100644 --- a/arch/microblaze/include/asm/atomic.h +++ b/arch/microblaze/include/asm/atomic.h | |||
@@ -118,6 +118,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
118 | #define smp_mb__before_atomic_inc() barrier() | 118 | #define smp_mb__before_atomic_inc() barrier() |
119 | #define smp_mb__after_atomic_inc() barrier() | 119 | #define smp_mb__after_atomic_inc() barrier() |
120 | 120 | ||
121 | #include <asm-generic/atomic.h> | 121 | #include <asm-generic/atomic-long.h> |
122 | 122 | ||
123 | #endif /* _ASM_MICROBLAZE_ATOMIC_H */ | 123 | #endif /* _ASM_MICROBLAZE_ATOMIC_H */ |
diff --git a/arch/microblaze/include/asm/bitsperlong.h b/arch/microblaze/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/microblaze/include/asm/bitsperlong.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bitsperlong.h> | |||
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index 3300b785049b..f989d6aad648 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 PetaLogix | 2 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> |
3 | * Copyright (C) 2007-2009 PetaLogix | ||
3 | * Copyright (C) 2007 John Williams <john.williams@petalogix.com> | 4 | * Copyright (C) 2007 John Williams <john.williams@petalogix.com> |
4 | * based on v850 version which was | 5 | * based on v850 version which was |
5 | * Copyright (C) 2001,02,03 NEC Electronics Corporation | 6 | * Copyright (C) 2001,02,03 NEC Electronics Corporation |
@@ -43,6 +44,23 @@ | |||
43 | #define flush_icache_range(start, len) __invalidate_icache_range(start, len) | 44 | #define flush_icache_range(start, len) __invalidate_icache_range(start, len) |
44 | #define flush_icache_page(vma, pg) do { } while (0) | 45 | #define flush_icache_page(vma, pg) do { } while (0) |
45 | 46 | ||
47 | #ifndef CONFIG_MMU | ||
48 | # define flush_icache_user_range(start, len) do { } while (0) | ||
49 | #else | ||
50 | # define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all() | ||
51 | |||
52 | # define flush_page_to_ram(page) do { } while (0) | ||
53 | |||
54 | # define flush_icache() __invalidate_icache_all() | ||
55 | # define flush_cache_sigtramp(vaddr) \ | ||
56 | __invalidate_icache_range(vaddr, vaddr + 8) | ||
57 | |||
58 | # define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
59 | # define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
60 | |||
61 | # define flush_cache_dup_mm(mm) do { } while (0) | ||
62 | #endif | ||
63 | |||
46 | #define flush_cache_vmap(start, end) do { } while (0) | 64 | #define flush_cache_vmap(start, end) do { } while (0) |
47 | #define flush_cache_vunmap(start, end) do { } while (0) | 65 | #define flush_cache_vunmap(start, end) do { } while (0) |
48 | 66 | ||
diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h index 92b30762ce59..97ea46b5cf80 100644 --- a/arch/microblaze/include/asm/checksum.h +++ b/arch/microblaze/include/asm/checksum.h | |||
@@ -51,7 +51,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
51 | * here even more important to align src and dst on a 32-bit (or even | 51 | * here even more important to align src and dst on a 32-bit (or even |
52 | * better 64-bit) boundary | 52 | * better 64-bit) boundary |
53 | */ | 53 | */ |
54 | extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum); | 54 | extern __wsum csum_partial_copy(const void *src, void *dst, int len, |
55 | __wsum sum); | ||
55 | 56 | ||
56 | /* | 57 | /* |
57 | * the same as csum_partial_copy, but copies from user space. | 58 | * the same as csum_partial_copy, but copies from user space. |
@@ -59,8 +60,8 @@ extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum); | |||
59 | * here even more important to align src and dst on a 32-bit (or even | 60 | * here even more important to align src and dst on a 32-bit (or even |
60 | * better 64-bit) boundary | 61 | * better 64-bit) boundary |
61 | */ | 62 | */ |
62 | extern __wsum csum_partial_copy_from_user(const char *src, char *dst, | 63 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
63 | int len, int sum, int *csum_err); | 64 | int len, __wsum sum, int *csum_err); |
64 | 65 | ||
65 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ | 66 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ |
66 | csum_partial_copy((src), (dst), (len), (sum)) | 67 | csum_partial_copy((src), (dst), (len), (sum)) |
@@ -75,11 +76,12 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | |||
75 | /* | 76 | /* |
76 | * Fold a partial checksum | 77 | * Fold a partial checksum |
77 | */ | 78 | */ |
78 | static inline __sum16 csum_fold(unsigned int sum) | 79 | static inline __sum16 csum_fold(__wsum csum) |
79 | { | 80 | { |
81 | u32 sum = (__force u32)csum; | ||
80 | sum = (sum & 0xffff) + (sum >> 16); | 82 | sum = (sum & 0xffff) + (sum >> 16); |
81 | sum = (sum & 0xffff) + (sum >> 16); | 83 | sum = (sum & 0xffff) + (sum >> 16); |
82 | return ~sum; | 84 | return (__force __sum16)~sum; |
83 | } | 85 | } |
84 | 86 | ||
85 | static inline __sum16 | 87 | static inline __sum16 |
@@ -93,6 +95,6 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, | |||
93 | * this routine is used for miscellaneous IP-like checksums, mainly | 95 | * this routine is used for miscellaneous IP-like checksums, mainly |
94 | * in icmp.c | 96 | * in icmp.c |
95 | */ | 97 | */ |
96 | extern __sum16 ip_compute_csum(const unsigned char *buff, int len); | 98 | extern __sum16 ip_compute_csum(const void *buff, int len); |
97 | 99 | ||
98 | #endif /* _ASM_MICROBLAZE_CHECKSUM_H */ | 100 | #endif /* _ASM_MICROBLAZE_CHECKSUM_H */ |
diff --git a/arch/microblaze/include/asm/current.h b/arch/microblaze/include/asm/current.h index 8375ea991e26..29303ed825cc 100644 --- a/arch/microblaze/include/asm/current.h +++ b/arch/microblaze/include/asm/current.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -9,6 +11,12 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_CURRENT_H | 11 | #ifndef _ASM_MICROBLAZE_CURRENT_H |
10 | #define _ASM_MICROBLAZE_CURRENT_H | 12 | #define _ASM_MICROBLAZE_CURRENT_H |
11 | 13 | ||
14 | /* | ||
15 | * Register used to hold the current task pointer while in the kernel. | ||
16 | * Any `call clobbered' register without a special meaning should be OK, | ||
17 | * but check asm/microblaze/kernel/entry.S to be sure. | ||
18 | */ | ||
19 | #define CURRENT_TASK r31 | ||
12 | # ifndef __ASSEMBLY__ | 20 | # ifndef __ASSEMBLY__ |
13 | /* | 21 | /* |
14 | * Dedicate r31 to keeping the current task pointer | 22 | * Dedicate r31 to keeping the current task pointer |
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 17336252a9b8..d00e40099165 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h | |||
@@ -1,129 +1 @@ | |||
1 | /* | #include <asm-generic/dma-mapping-broken.h> | |
2 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_MICROBLAZE_DMA_MAPPING_H | ||
10 | #define _ASM_MICROBLAZE_DMA_MAPPING_H | ||
11 | |||
12 | #include <asm/cacheflush.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/bug.h> | ||
15 | |||
16 | struct scatterlist; | ||
17 | |||
18 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
19 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
20 | |||
21 | /* FIXME */ | ||
22 | static inline int | ||
23 | dma_supported(struct device *dev, u64 mask) | ||
24 | { | ||
25 | return 1; | ||
26 | } | ||
27 | |||
28 | static inline dma_addr_t | ||
29 | dma_map_page(struct device *dev, struct page *page, | ||
30 | unsigned long offset, size_t size, | ||
31 | enum dma_data_direction direction) | ||
32 | { | ||
33 | BUG(); | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static inline void | ||
38 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
39 | enum dma_data_direction direction) | ||
40 | { | ||
41 | BUG(); | ||
42 | } | ||
43 | |||
44 | static inline int | ||
45 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
46 | enum dma_data_direction direction) | ||
47 | { | ||
48 | BUG(); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static inline void | ||
53 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
54 | enum dma_data_direction direction) | ||
55 | { | ||
56 | BUG(); | ||
57 | } | ||
58 | |||
59 | static inline void | ||
60 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
61 | enum dma_data_direction direction) | ||
62 | { | ||
63 | BUG(); | ||
64 | } | ||
65 | |||
66 | static inline void | ||
67 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
68 | size_t size, enum dma_data_direction direction) | ||
69 | { | ||
70 | BUG(); | ||
71 | } | ||
72 | |||
73 | static inline void | ||
74 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
75 | enum dma_data_direction direction) | ||
76 | { | ||
77 | BUG(); | ||
78 | } | ||
79 | |||
80 | static inline void | ||
81 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
82 | enum dma_data_direction direction) | ||
83 | { | ||
84 | BUG(); | ||
85 | } | ||
86 | |||
87 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | ||
93 | dma_addr_t *dma_handle, int flag) | ||
94 | { | ||
95 | return NULL; /* consistent_alloc(flag, size, dma_handle); */ | ||
96 | } | ||
97 | |||
98 | static inline void dma_free_coherent(struct device *dev, size_t size, | ||
99 | void *vaddr, dma_addr_t dma_handle) | ||
100 | { | ||
101 | BUG(); | ||
102 | } | ||
103 | |||
104 | static inline dma_addr_t | ||
105 | dma_map_single(struct device *dev, void *ptr, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | BUG_ON(direction == DMA_NONE); | ||
109 | |||
110 | return virt_to_bus(ptr); | ||
111 | } | ||
112 | |||
113 | static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
114 | size_t size, | ||
115 | enum dma_data_direction direction) | ||
116 | { | ||
117 | switch (direction) { | ||
118 | case DMA_FROM_DEVICE: | ||
119 | flush_dcache_range((unsigned)dma_addr, | ||
120 | (unsigned)dma_addr + size); | ||
121 | /* Fall through */ | ||
122 | case DMA_TO_DEVICE: | ||
123 | break; | ||
124 | default: | ||
125 | BUG(); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */ | ||
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h index 0967fa04fc5e..08c073badf19 100644 --- a/arch/microblaze/include/asm/dma.h +++ b/arch/microblaze/include/asm/dma.h | |||
@@ -9,8 +9,13 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_DMA_H | 9 | #ifndef _ASM_MICROBLAZE_DMA_H |
10 | #define _ASM_MICROBLAZE_DMA_H | 10 | #define _ASM_MICROBLAZE_DMA_H |
11 | 11 | ||
12 | #ifndef CONFIG_MMU | ||
12 | /* we don't have dma address limit. define it as zero to be | 13 | /* we don't have dma address limit. define it as zero to be |
13 | * unlimited. */ | 14 | * unlimited. */ |
14 | #define MAX_DMA_ADDRESS (0) | 15 | #define MAX_DMA_ADDRESS (0) |
16 | #else | ||
17 | /* Virtual address corresponding to last available physical memory address. */ | ||
18 | #define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) | ||
19 | #endif | ||
15 | 20 | ||
16 | #endif /* _ASM_MICROBLAZE_DMA_H */ | 21 | #endif /* _ASM_MICROBLAZE_DMA_H */ |
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h index 81337f241347..f92fc0dda006 100644 --- a/arch/microblaze/include/asm/elf.h +++ b/arch/microblaze/include/asm/elf.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -27,4 +29,95 @@ | |||
27 | */ | 29 | */ |
28 | #define ELF_CLASS ELFCLASS32 | 30 | #define ELF_CLASS ELFCLASS32 |
29 | 31 | ||
32 | #ifndef __uClinux__ | ||
33 | |||
34 | /* | ||
35 | * ELF register definitions.. | ||
36 | */ | ||
37 | |||
38 | #include <asm/ptrace.h> | ||
39 | #include <asm/byteorder.h> | ||
40 | |||
41 | #ifndef ELF_GREG_T | ||
42 | #define ELF_GREG_T | ||
43 | typedef unsigned long elf_greg_t; | ||
44 | #endif | ||
45 | |||
46 | #ifndef ELF_NGREG | ||
47 | #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) | ||
48 | #endif | ||
49 | |||
50 | #ifndef ELF_GREGSET_T | ||
51 | #define ELF_GREGSET_T | ||
52 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
53 | #endif | ||
54 | |||
55 | #ifndef ELF_FPREGSET_T | ||
56 | #define ELF_FPREGSET_T | ||
57 | |||
58 | /* TBD */ | ||
59 | #define ELF_NFPREG 33 /* includes fsr */ | ||
60 | typedef unsigned long elf_fpreg_t; | ||
61 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
62 | |||
63 | /* typedef struct user_fpu_struct elf_fpregset_t; */ | ||
64 | #endif | ||
65 | |||
66 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
67 | * use of this is to invoke "./ld.so someprog" to test out a new version of | ||
68 | * the loader. We need to make sure that it is out of the way of the program | ||
69 | * that it will "exec", and that there is sufficient room for the brk. | ||
70 | */ | ||
71 | |||
72 | #define ELF_ET_DYN_BASE (0x08000000) | ||
73 | |||
74 | #ifdef __LITTLE_ENDIAN__ | ||
75 | #define ELF_DATA ELFDATA2LSB | ||
76 | #else | ||
77 | #define ELF_DATA ELFDATA2MSB | ||
78 | #endif | ||
79 | |||
80 | #define USE_ELF_CORE_DUMP | ||
81 | #define ELF_EXEC_PAGESIZE 4096 | ||
82 | |||
83 | |||
84 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | ||
85 | memcpy((char *) &_dest, (char *) _regs, \ | ||
86 | sizeof(struct pt_regs)); | ||
87 | |||
88 | /* This yields a mask that user programs can use to figure out what | ||
89 | * instruction set this CPU supports. This could be done in user space, | ||
90 | * but it's not easy, and we've already done it here. | ||
91 | */ | ||
92 | #define ELF_HWCAP (0) | ||
93 | |||
94 | /* This yields a string that ld.so will use to load implementation | ||
95 | * specific libraries for optimization. This is more specific in | ||
96 | * intent than poking at uname or /proc/cpuinfo. | ||
97 | |||
98 | * For the moment, we have only optimizations for the Intel generations, | ||
99 | * but that could change... | ||
100 | */ | ||
101 | #define ELF_PLATFORM (NULL) | ||
102 | |||
103 | /* Added _f parameter. Is this definition correct: TBD */ | ||
104 | #define ELF_PLAT_INIT(_r, _f) \ | ||
105 | do { \ | ||
106 | _r->r1 = _r->r1 = _r->r2 = _r->r3 = \ | ||
107 | _r->r4 = _r->r5 = _r->r6 = _r->r7 = \ | ||
108 | _r->r8 = _r->r9 = _r->r10 = _r->r11 = \ | ||
109 | _r->r12 = _r->r13 = _r->r14 = _r->r15 = \ | ||
110 | _r->r16 = _r->r17 = _r->r18 = _r->r19 = \ | ||
111 | _r->r20 = _r->r21 = _r->r22 = _r->r23 = \ | ||
112 | _r->r24 = _r->r25 = _r->r26 = _r->r27 = \ | ||
113 | _r->r28 = _r->r29 = _r->r30 = _r->r31 = \ | ||
114 | 0; \ | ||
115 | } while (0) | ||
116 | |||
117 | #ifdef __KERNEL__ | ||
118 | #define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT) | ||
119 | #endif | ||
120 | |||
121 | #endif /* __uClinux__ */ | ||
122 | |||
30 | #endif /* _ASM_MICROBLAZE_ELF_H */ | 123 | #endif /* _ASM_MICROBLAZE_ELF_H */ |
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h index e4c3aef884df..61abbd232640 100644 --- a/arch/microblaze/include/asm/entry.h +++ b/arch/microblaze/include/asm/entry.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Definitions used by low-level trap handlers | 2 | * Definitions used by low-level trap handlers |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Michal Simek | 4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
5 | * Copyright (C) 2007 - 2008 PetaLogix | 5 | * Copyright (C) 2007-2009 PetaLogix |
6 | * Copyright (C) 2007 John Williams <john.williams@petalogix.com> | 6 | * Copyright (C) 2007 John Williams <john.williams@petalogix.com> |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General | 8 | * This file is subject to the terms and conditions of the GNU General |
@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */ | |||
31 | DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ | 31 | DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ |
32 | # endif /* __ASSEMBLY__ */ | 32 | # endif /* __ASSEMBLY__ */ |
33 | 33 | ||
34 | #ifndef CONFIG_MMU | ||
35 | |||
34 | /* noMMU hasn't any space for args */ | 36 | /* noMMU hasn't any space for args */ |
35 | # define STATE_SAVE_ARG_SPACE (0) | 37 | # define STATE_SAVE_ARG_SPACE (0) |
36 | 38 | ||
39 | #else /* CONFIG_MMU */ | ||
40 | |||
41 | /* If true, system calls save and restore all registers (except result | ||
42 | * registers, of course). If false, then `call clobbered' registers | ||
43 | * will not be preserved, on the theory that system calls are basically | ||
44 | * function calls anyway, and the caller should be able to deal with it. | ||
45 | * This is a security risk, of course, as `internal' values may leak out | ||
46 | * after a system call, but that certainly doesn't matter very much for | ||
47 | * a processor with no MMU protection! For a protected-mode kernel, it | ||
48 | * would be faster to just zero those registers before returning. | ||
49 | * | ||
50 | * I can not rely on the glibc implementation. If you turn it off make | ||
51 | * sure that r11/r12 is saved in user-space. --KAA | ||
52 | * | ||
53 | * These are special variables using by the kernel trap/interrupt code | ||
54 | * to save registers in, at a time when there are no spare registers we | ||
55 | * can use to do so, and we can't depend on the value of the stack | ||
56 | * pointer. This means that they must be within a signed 16-bit | ||
57 | * displacement of 0x00000000. | ||
58 | */ | ||
59 | |||
60 | /* A `state save frame' is a struct pt_regs preceded by some extra space | ||
61 | * suitable for a function call stack frame. */ | ||
62 | |||
63 | /* Amount of room on the stack reserved for arguments and to satisfy the | ||
64 | * C calling conventions, in addition to the space used by the struct | ||
65 | * pt_regs that actually holds saved values. */ | ||
66 | #define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */ | ||
67 | |||
68 | #endif /* CONFIG_MMU */ | ||
69 | |||
37 | #endif /* _ASM_MICROBLAZE_ENTRY_H */ | 70 | #endif /* _ASM_MICROBLAZE_ENTRY_H */ |
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h index 24ca540e77c0..90731df9e574 100644 --- a/arch/microblaze/include/asm/exceptions.h +++ b/arch/microblaze/include/asm/exceptions.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Preliminary support for HW exception handing for Microblaze | 2 | * Preliminary support for HW exception handing for Microblaze |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Michal Simek | 4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
5 | * Copyright (C) 2008 PetaLogix | 5 | * Copyright (C) 2008-2009 PetaLogix |
6 | * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> | 6 | * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General | 8 | * This file is subject to the terms and conditions of the GNU General |
@@ -64,21 +64,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
64 | void die(const char *str, struct pt_regs *fp, long err); | 64 | void die(const char *str, struct pt_regs *fp, long err); |
65 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); | 65 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); |
66 | 66 | ||
67 | #if defined(CONFIG_XMON) | 67 | #ifdef CONFIG_MMU |
68 | extern void xmon(struct pt_regs *regs); | 68 | void __bug(const char *file, int line, void *data); |
69 | extern int xmon_bpt(struct pt_regs *regs); | 69 | int bad_trap(int trap_num, struct pt_regs *regs); |
70 | extern int xmon_sstep(struct pt_regs *regs); | 70 | int debug_trap(struct pt_regs *regs); |
71 | extern int xmon_iabr_match(struct pt_regs *regs); | 71 | #endif /* CONFIG_MMU */ |
72 | extern int xmon_dabr_match(struct pt_regs *regs); | ||
73 | extern void (*xmon_fault_handler)(struct pt_regs *regs); | ||
74 | 72 | ||
75 | void (*debugger)(struct pt_regs *regs) = xmon; | 73 | #if defined(CONFIG_KGDB) |
76 | int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt; | ||
77 | int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep; | ||
78 | int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match; | ||
79 | int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match; | ||
80 | void (*debugger_fault_handler)(struct pt_regs *regs); | ||
81 | #elif defined(CONFIG_KGDB) | ||
82 | void (*debugger)(struct pt_regs *regs); | 74 | void (*debugger)(struct pt_regs *regs); |
83 | int (*debugger_bpt)(struct pt_regs *regs); | 75 | int (*debugger_bpt)(struct pt_regs *regs); |
84 | int (*debugger_sstep)(struct pt_regs *regs); | 76 | int (*debugger_sstep)(struct pt_regs *regs); |
diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h index acf0da543ef1..6847c1512c7b 100644 --- a/arch/microblaze/include/asm/flat.h +++ b/arch/microblaze/include/asm/flat.h | |||
@@ -13,7 +13,6 @@ | |||
13 | 13 | ||
14 | #include <asm/unaligned.h> | 14 | #include <asm/unaligned.h> |
15 | 15 | ||
16 | #define flat_stack_align(sp) /* nothing needed */ | ||
17 | #define flat_argvp_envp_on_stack() 0 | 16 | #define flat_argvp_envp_on_stack() 0 |
18 | #define flat_old_ram_flag(flags) (flags) | 17 | #define flat_old_ram_flag(flags) (flags) |
19 | #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) | 18 | #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) |
diff --git a/arch/microblaze/include/asm/gpio.h b/arch/microblaze/include/asm/gpio.h index ea04632399d8..2345ac354d9b 100644 --- a/arch/microblaze/include/asm/gpio.h +++ b/arch/microblaze/include/asm/gpio.h | |||
@@ -11,8 +11,8 @@ | |||
11 | * (at your option) any later version. | 11 | * (at your option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #ifndef __ASM_POWERPC_GPIO_H | 14 | #ifndef _ASM_MICROBLAZE_GPIO_H |
15 | #define __ASM_POWERPC_GPIO_H | 15 | #define _ASM_MICROBLAZE_GPIO_H |
16 | 16 | ||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <asm-generic/gpio.h> | 18 | #include <asm-generic/gpio.h> |
@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) | |||
53 | 53 | ||
54 | #endif /* CONFIG_GPIOLIB */ | 54 | #endif /* CONFIG_GPIOLIB */ |
55 | 55 | ||
56 | #endif /* __ASM_POWERPC_GPIO_H */ | 56 | #endif /* _ASM_MICROBLAZE_GPIO_H */ |
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 8b5853ee6b5c..5c173424d074 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2007-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -12,6 +14,9 @@ | |||
12 | #include <asm/byteorder.h> | 14 | #include <asm/byteorder.h> |
13 | #include <asm/page.h> | 15 | #include <asm/page.h> |
14 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <asm/byteorder.h> | ||
18 | #include <linux/mm.h> /* Get struct page {...} */ | ||
19 | |||
15 | 20 | ||
16 | #define IO_SPACE_LIMIT (0xFFFFFFFF) | 21 | #define IO_SPACE_LIMIT (0xFFFFFFFF) |
17 | 22 | ||
@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) | |||
112 | #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) | 117 | #define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) |
113 | #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) | 118 | #define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) |
114 | 119 | ||
120 | #ifdef CONFIG_MMU | ||
121 | |||
122 | #define mm_ptov(addr) ((void *)__phys_to_virt(addr)) | ||
123 | #define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr)) | ||
124 | #define phys_to_virt(addr) ((void *)__phys_to_virt(addr)) | ||
125 | #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) | ||
126 | #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) | ||
127 | |||
128 | #define __page_address(page) \ | ||
129 | (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) | ||
130 | #define page_to_phys(page) virt_to_phys((void *)__page_address(page)) | ||
131 | #define page_to_bus(page) (page_to_phys(page)) | ||
132 | #define bus_to_virt(addr) (phys_to_virt(addr)) | ||
133 | |||
134 | extern void iounmap(void *addr); | ||
135 | /*extern void *__ioremap(phys_addr_t address, unsigned long size, | ||
136 | unsigned long flags);*/ | ||
137 | extern void __iomem *ioremap(phys_addr_t address, unsigned long size); | ||
138 | #define ioremap_writethrough(addr, size) ioremap((addr), (size)) | ||
139 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) | ||
140 | #define ioremap_fullcache(addr, size) ioremap((addr), (size)) | ||
141 | |||
142 | #else /* CONFIG_MMU */ | ||
143 | |||
115 | /** | 144 | /** |
116 | * virt_to_phys - map virtual addresses to physical | 145 | * virt_to_phys - map virtual addresses to physical |
117 | * @address: address to remap | 146 | * @address: address to remap |
@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size, | |||
160 | #define iounmap(addr) ((void)0) | 189 | #define iounmap(addr) ((void)0) |
161 | #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) | 190 | #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) |
162 | 191 | ||
192 | #endif /* CONFIG_MMU */ | ||
193 | |||
163 | /* | 194 | /* |
164 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | 195 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
165 | * access | 196 | * access |
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h index 0e0431d61635..66cad6a99d77 100644 --- a/arch/microblaze/include/asm/mmu.h +++ b/arch/microblaze/include/asm/mmu.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -9,11 +11,109 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_MMU_H | 11 | #ifndef _ASM_MICROBLAZE_MMU_H |
10 | #define _ASM_MICROBLAZE_MMU_H | 12 | #define _ASM_MICROBLAZE_MMU_H |
11 | 13 | ||
12 | #ifndef __ASSEMBLY__ | 14 | # ifndef CONFIG_MMU |
15 | # ifndef __ASSEMBLY__ | ||
13 | typedef struct { | 16 | typedef struct { |
14 | struct vm_list_struct *vmlist; | 17 | struct vm_list_struct *vmlist; |
15 | unsigned long end_brk; | 18 | unsigned long end_brk; |
16 | } mm_context_t; | 19 | } mm_context_t; |
17 | #endif /* __ASSEMBLY__ */ | 20 | # endif /* __ASSEMBLY__ */ |
21 | # else /* CONFIG_MMU */ | ||
22 | # ifdef __KERNEL__ | ||
23 | # ifndef __ASSEMBLY__ | ||
18 | 24 | ||
25 | /* Default "unsigned long" context */ | ||
26 | typedef unsigned long mm_context_t; | ||
27 | |||
28 | /* Hardware Page Table Entry */ | ||
29 | typedef struct _PTE { | ||
30 | unsigned long v:1; /* Entry is valid */ | ||
31 | unsigned long vsid:24; /* Virtual segment identifier */ | ||
32 | unsigned long h:1; /* Hash algorithm indicator */ | ||
33 | unsigned long api:6; /* Abbreviated page index */ | ||
34 | unsigned long rpn:20; /* Real (physical) page number */ | ||
35 | unsigned long :3; /* Unused */ | ||
36 | unsigned long r:1; /* Referenced */ | ||
37 | unsigned long c:1; /* Changed */ | ||
38 | unsigned long w:1; /* Write-thru cache mode */ | ||
39 | unsigned long i:1; /* Cache inhibited */ | ||
40 | unsigned long m:1; /* Memory coherence */ | ||
41 | unsigned long g:1; /* Guarded */ | ||
42 | unsigned long :1; /* Unused */ | ||
43 | unsigned long pp:2; /* Page protection */ | ||
44 | } PTE; | ||
45 | |||
46 | /* Values for PP (assumes Ks=0, Kp=1) */ | ||
47 | # define PP_RWXX 0 /* Supervisor read/write, User none */ | ||
48 | # define PP_RWRX 1 /* Supervisor read/write, User read */ | ||
49 | # define PP_RWRW 2 /* Supervisor read/write, User read/write */ | ||
50 | # define PP_RXRX 3 /* Supervisor read, User read */ | ||
51 | |||
52 | /* Segment Register */ | ||
53 | typedef struct _SEGREG { | ||
54 | unsigned long t:1; /* Normal or I/O type */ | ||
55 | unsigned long ks:1; /* Supervisor 'key' (normally 0) */ | ||
56 | unsigned long kp:1; /* User 'key' (normally 1) */ | ||
57 | unsigned long n:1; /* No-execute */ | ||
58 | unsigned long :4; /* Unused */ | ||
59 | unsigned long vsid:24; /* Virtual Segment Identifier */ | ||
60 | } SEGREG; | ||
61 | |||
62 | extern void _tlbie(unsigned long va); /* invalidate a TLB entry */ | ||
63 | extern void _tlbia(void); /* invalidate all TLB entries */ | ||
64 | # endif /* __ASSEMBLY__ */ | ||
65 | |||
66 | /* | ||
67 | * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The | ||
68 | * instruction and data sides share a unified, 64-entry, semi-associative | ||
69 | * TLB which is maintained totally under software control. In addition, the | ||
70 | * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative | ||
71 | * TLB which serves as a first level to the shared TLB. These two TLBs are | ||
72 | * known as the UTLB and ITLB, respectively. | ||
73 | */ | ||
74 | |||
75 | # define MICROBLAZE_TLB_SIZE 64 | ||
76 | |||
77 | /* | ||
78 | * TLB entries are defined by a "high" tag portion and a "low" data | ||
79 | * portion. The data portion is 32-bits. | ||
80 | * | ||
81 | * TLB entries are managed entirely under software control by reading, | ||
82 | * writing, and searching using the MTS and MFS instructions. | ||
83 | */ | ||
84 | |||
85 | # define TLB_LO 1 | ||
86 | # define TLB_HI 0 | ||
87 | # define TLB_DATA TLB_LO | ||
88 | # define TLB_TAG TLB_HI | ||
89 | |||
90 | /* Tag portion */ | ||
91 | # define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ | ||
92 | # define TLB_PAGESZ_MASK 0x00000380 | ||
93 | # define TLB_PAGESZ(x) (((x) & 0x7) << 7) | ||
94 | # define PAGESZ_1K 0 | ||
95 | # define PAGESZ_4K 1 | ||
96 | # define PAGESZ_16K 2 | ||
97 | # define PAGESZ_64K 3 | ||
98 | # define PAGESZ_256K 4 | ||
99 | # define PAGESZ_1M 5 | ||
100 | # define PAGESZ_4M 6 | ||
101 | # define PAGESZ_16M 7 | ||
102 | # define TLB_VALID 0x00000040 /* Entry is valid */ | ||
103 | |||
104 | /* Data portion */ | ||
105 | # define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ | ||
106 | # define TLB_PERM_MASK 0x00000300 | ||
107 | # define TLB_EX 0x00000200 /* Instruction execution allowed */ | ||
108 | # define TLB_WR 0x00000100 /* Writes permitted */ | ||
109 | # define TLB_ZSEL_MASK 0x000000F0 | ||
110 | # define TLB_ZSEL(x) (((x) & 0xF) << 4) | ||
111 | # define TLB_ATTR_MASK 0x0000000F | ||
112 | # define TLB_W 0x00000008 /* Caching is write-through */ | ||
113 | # define TLB_I 0x00000004 /* Caching is inhibited */ | ||
114 | # define TLB_M 0x00000002 /* Memory is coherent */ | ||
115 | # define TLB_G 0x00000001 /* Memory is guarded from prefetch */ | ||
116 | |||
117 | # endif /* __KERNEL__ */ | ||
118 | # endif /* CONFIG_MMU */ | ||
19 | #endif /* _ASM_MICROBLAZE_MMU_H */ | 119 | #endif /* _ASM_MICROBLAZE_MMU_H */ |
diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h index 150ca01b74ba..385fed16bbfb 100644 --- a/arch/microblaze/include/asm/mmu_context.h +++ b/arch/microblaze/include/asm/mmu_context.h | |||
@@ -1,21 +1,5 @@ | |||
1 | /* | 1 | #ifdef CONFIG_MMU |
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 2 | # include "mmu_context_mm.h" |
3 | * | 3 | #else |
4 | * This file is subject to the terms and conditions of the GNU General Public | 4 | # include "mmu_context_no.h" |
5 | * License. See the file "COPYING" in the main directory of this archive | 5 | #endif |
6 | * for more details. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H | ||
10 | #define _ASM_MICROBLAZE_MMU_CONTEXT_H | ||
11 | |||
12 | # define init_new_context(tsk, mm) ({ 0; }) | ||
13 | |||
14 | # define enter_lazy_tlb(mm, tsk) do {} while (0) | ||
15 | # define change_mm_context(old, ctx, _pml4) do {} while (0) | ||
16 | # define destroy_context(mm) do {} while (0) | ||
17 | # define deactivate_mm(tsk, mm) do {} while (0) | ||
18 | # define switch_mm(prev, next, tsk) do {} while (0) | ||
19 | # define activate_mm(prev, next) do {} while (0) | ||
20 | |||
21 | #endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */ | ||
diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h new file mode 100644 index 000000000000..3e5c254e8d1c --- /dev/null +++ b/arch/microblaze/include/asm/mmu_context_mm.h | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
4 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H | ||
12 | #define _ASM_MICROBLAZE_MMU_CONTEXT_H | ||
13 | |||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/bitops.h> | ||
16 | #include <asm/mmu.h> | ||
17 | #include <asm-generic/mm_hooks.h> | ||
18 | |||
19 | # ifdef __KERNEL__ | ||
20 | /* | ||
21 | * This function defines the mapping from contexts to VSIDs (virtual | ||
22 | * segment IDs). We use a skew on both the context and the high 4 bits | ||
23 | * of the 32-bit virtual address (the "effective segment ID") in order | ||
24 | * to spread out the entries in the MMU hash table. | ||
25 | */ | ||
26 | # define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ | ||
27 | & 0xffffff) | ||
28 | |||
29 | /* | ||
30 | MicroBlaze has 256 contexts, so we can just rotate through these | ||
31 | as a way of "switching" contexts. If the TID of the TLB is zero, | ||
32 | the PID/TID comparison is disabled, so we can use a TID of zero | ||
33 | to represent all kernel pages as shared among all contexts. | ||
34 | */ | ||
35 | |||
36 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
37 | { | ||
38 | } | ||
39 | |||
40 | # define NO_CONTEXT 256 | ||
41 | # define LAST_CONTEXT 255 | ||
42 | # define FIRST_CONTEXT 1 | ||
43 | |||
44 | /* | ||
45 | * Set the current MMU context. | ||
46 | * This is done byloading up the segment registers for the user part of the | ||
47 | * address space. | ||
48 | * | ||
49 | * Since the PGD is immediately available, it is much faster to simply | ||
50 | * pass this along as a second parameter, which is required for 8xx and | ||
51 | * can be used for debugging on all processors (if you happen to have | ||
52 | * an Abatron). | ||
53 | */ | ||
54 | extern void set_context(mm_context_t context, pgd_t *pgd); | ||
55 | |||
56 | /* | ||
57 | * Bitmap of contexts in use. | ||
58 | * The size of this bitmap is LAST_CONTEXT + 1 bits. | ||
59 | */ | ||
60 | extern unsigned long context_map[]; | ||
61 | |||
62 | /* | ||
63 | * This caches the next context number that we expect to be free. | ||
64 | * Its use is an optimization only, we can't rely on this context | ||
65 | * number to be free, but it usually will be. | ||
66 | */ | ||
67 | extern mm_context_t next_mmu_context; | ||
68 | |||
69 | /* | ||
70 | * Since we don't have sufficient contexts to give one to every task | ||
71 | * that could be in the system, we need to be able to steal contexts. | ||
72 | * These variables support that. | ||
73 | */ | ||
74 | extern atomic_t nr_free_contexts; | ||
75 | extern struct mm_struct *context_mm[LAST_CONTEXT+1]; | ||
76 | extern void steal_context(void); | ||
77 | |||
78 | /* | ||
79 | * Get a new mmu context for the address space described by `mm'. | ||
80 | */ | ||
81 | static inline void get_mmu_context(struct mm_struct *mm) | ||
82 | { | ||
83 | mm_context_t ctx; | ||
84 | |||
85 | if (mm->context != NO_CONTEXT) | ||
86 | return; | ||
87 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) | ||
88 | steal_context(); | ||
89 | ctx = next_mmu_context; | ||
90 | while (test_and_set_bit(ctx, context_map)) { | ||
91 | ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); | ||
92 | if (ctx > LAST_CONTEXT) | ||
93 | ctx = 0; | ||
94 | } | ||
95 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; | ||
96 | mm->context = ctx; | ||
97 | context_mm[ctx] = mm; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Set up the context for a new address space. | ||
102 | */ | ||
103 | # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) | ||
104 | |||
105 | /* | ||
106 | * We're finished using the context for an address space. | ||
107 | */ | ||
108 | static inline void destroy_context(struct mm_struct *mm) | ||
109 | { | ||
110 | if (mm->context != NO_CONTEXT) { | ||
111 | clear_bit(mm->context, context_map); | ||
112 | mm->context = NO_CONTEXT; | ||
113 | atomic_inc(&nr_free_contexts); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
118 | struct task_struct *tsk) | ||
119 | { | ||
120 | tsk->thread.pgdir = next->pgd; | ||
121 | get_mmu_context(next); | ||
122 | set_context(next->context, next->pgd); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * After we have set current->mm to a new value, this activates | ||
127 | * the context for the new mm so we see the new mappings. | ||
128 | */ | ||
129 | static inline void activate_mm(struct mm_struct *active_mm, | ||
130 | struct mm_struct *mm) | ||
131 | { | ||
132 | current->thread.pgdir = mm->pgd; | ||
133 | get_mmu_context(mm); | ||
134 | set_context(mm->context, mm->pgd); | ||
135 | } | ||
136 | |||
137 | extern void mmu_context_init(void); | ||
138 | |||
139 | # endif /* __KERNEL__ */ | ||
140 | #endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */ | ||
diff --git a/arch/microblaze/include/asm/mmu_context_no.h b/arch/microblaze/include/asm/mmu_context_no.h new file mode 100644 index 000000000000..ba5567190154 --- /dev/null +++ b/arch/microblaze/include/asm/mmu_context_no.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
4 | * Copyright (C) 2006 Atmark Techno, Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H | ||
12 | #define _ASM_MICROBLAZE_MMU_CONTEXT_H | ||
13 | |||
14 | # define init_new_context(tsk, mm) ({ 0; }) | ||
15 | |||
16 | # define enter_lazy_tlb(mm, tsk) do {} while (0) | ||
17 | # define change_mm_context(old, ctx, _pml4) do {} while (0) | ||
18 | # define destroy_context(mm) do {} while (0) | ||
19 | # define deactivate_mm(tsk, mm) do {} while (0) | ||
20 | # define switch_mm(prev, next, tsk) do {} while (0) | ||
21 | # define activate_mm(prev, next) do {} while (0) | ||
22 | |||
23 | #endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */ | ||
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 7238dcfcc517..72aceae88680 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Michal Simek | 2 | * VM ops |
3 | * Copyright (C) 2008 PetaLogix | 3 | * |
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
5 | * Copyright (C) 2008-2009 PetaLogix | ||
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 6 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * Changes for MMU support: | 7 | * Changes for MMU support: |
6 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | 8 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. |
@@ -15,14 +17,15 @@ | |||
15 | 17 | ||
16 | #include <linux/pfn.h> | 18 | #include <linux/pfn.h> |
17 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
20 | #include <linux/const.h> | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
18 | 23 | ||
19 | /* PAGE_SHIFT determines the page size */ | 24 | /* PAGE_SHIFT determines the page size */ |
20 | #define PAGE_SHIFT (12) | 25 | #define PAGE_SHIFT (12) |
21 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 26 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
22 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 27 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
23 | 28 | ||
24 | #ifdef __KERNEL__ | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
27 | 30 | ||
28 | #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) | 31 | #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) |
@@ -35,6 +38,7 @@ | |||
35 | /* align addr on a size boundary - adjust address up if needed */ | 38 | /* align addr on a size boundary - adjust address up if needed */ |
36 | #define _ALIGN(addr, size) _ALIGN_UP(addr, size) | 39 | #define _ALIGN(addr, size) _ALIGN_UP(addr, size) |
37 | 40 | ||
41 | #ifndef CONFIG_MMU | ||
38 | /* | 42 | /* |
39 | * PAGE_OFFSET -- the first address of the first page of memory. When not | 43 | * PAGE_OFFSET -- the first address of the first page of memory. When not |
40 | * using MMU this corresponds to the first free page in physical memory (aligned | 44 | * using MMU this corresponds to the first free page in physical memory (aligned |
@@ -43,15 +47,44 @@ | |||
43 | extern unsigned int __page_offset; | 47 | extern unsigned int __page_offset; |
44 | #define PAGE_OFFSET __page_offset | 48 | #define PAGE_OFFSET __page_offset |
45 | 49 | ||
46 | #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | 50 | #else /* CONFIG_MMU */ |
47 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | ||
48 | #define free_user_page(page, addr) free_page(addr) | ||
49 | 51 | ||
50 | #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) | 52 | /* |
53 | * PAGE_OFFSET -- the first address of the first page of memory. With MMU | ||
54 | * it is set to the kernel start address (aligned on a page boundary). | ||
55 | * | ||
56 | * CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used | ||
57 | * in arch/microblaze/Makefile. | ||
58 | */ | ||
59 | #define PAGE_OFFSET CONFIG_KERNEL_START | ||
51 | 60 | ||
61 | /* | ||
62 | * MAP_NR -- given an address, calculate the index of the page struct which | ||
63 | * points to the address's page. | ||
64 | */ | ||
65 | #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT) | ||
52 | 66 | ||
53 | #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) | 67 | /* |
54 | #define copy_user_page(vto, vfrom, vaddr, topg) \ | 68 | * The basic type of a PTE - 32 bit physical addressing. |
69 | */ | ||
70 | typedef unsigned long pte_basic_t; | ||
71 | #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ | ||
72 | #define PTE_FMT "%.8lx" | ||
73 | |||
74 | #endif /* CONFIG_MMU */ | ||
75 | |||
76 | # ifndef CONFIG_MMU | ||
77 | # define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) | ||
78 | # define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | ||
79 | # define free_user_page(page, addr) free_page(addr) | ||
80 | # else /* CONFIG_MMU */ | ||
81 | extern void copy_page(void *to, void *from); | ||
82 | # endif /* CONFIG_MMU */ | ||
83 | |||
84 | # define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) | ||
85 | |||
86 | # define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) | ||
87 | # define copy_user_page(vto, vfrom, vaddr, topg) \ | ||
55 | memcpy((vto), (vfrom), PAGE_SIZE) | 88 | memcpy((vto), (vfrom), PAGE_SIZE) |
56 | 89 | ||
57 | /* | 90 | /* |
@@ -60,21 +93,32 @@ extern unsigned int __page_offset; | |||
60 | typedef struct page *pgtable_t; | 93 | typedef struct page *pgtable_t; |
61 | typedef struct { unsigned long pte; } pte_t; | 94 | typedef struct { unsigned long pte; } pte_t; |
62 | typedef struct { unsigned long pgprot; } pgprot_t; | 95 | typedef struct { unsigned long pgprot; } pgprot_t; |
96 | /* FIXME this can depend on linux kernel version */ | ||
97 | # ifdef CONFIG_MMU | ||
98 | typedef struct { unsigned long pmd; } pmd_t; | ||
99 | typedef struct { unsigned long pgd; } pgd_t; | ||
100 | # else /* CONFIG_MMU */ | ||
63 | typedef struct { unsigned long ste[64]; } pmd_t; | 101 | typedef struct { unsigned long ste[64]; } pmd_t; |
64 | typedef struct { pmd_t pue[1]; } pud_t; | 102 | typedef struct { pmd_t pue[1]; } pud_t; |
65 | typedef struct { pud_t pge[1]; } pgd_t; | 103 | typedef struct { pud_t pge[1]; } pgd_t; |
104 | # endif /* CONFIG_MMU */ | ||
66 | 105 | ||
106 | # define pte_val(x) ((x).pte) | ||
107 | # define pgprot_val(x) ((x).pgprot) | ||
67 | 108 | ||
68 | #define pte_val(x) ((x).pte) | 109 | # ifdef CONFIG_MMU |
69 | #define pgprot_val(x) ((x).pgprot) | 110 | # define pmd_val(x) ((x).pmd) |
70 | #define pmd_val(x) ((x).ste[0]) | 111 | # define pgd_val(x) ((x).pgd) |
71 | #define pud_val(x) ((x).pue[0]) | 112 | # else /* CONFIG_MMU */ |
72 | #define pgd_val(x) ((x).pge[0]) | 113 | # define pmd_val(x) ((x).ste[0]) |
114 | # define pud_val(x) ((x).pue[0]) | ||
115 | # define pgd_val(x) ((x).pge[0]) | ||
116 | # endif /* CONFIG_MMU */ | ||
73 | 117 | ||
74 | #define __pte(x) ((pte_t) { (x) }) | 118 | # define __pte(x) ((pte_t) { (x) }) |
75 | #define __pmd(x) ((pmd_t) { (x) }) | 119 | # define __pmd(x) ((pmd_t) { (x) }) |
76 | #define __pgd(x) ((pgd_t) { (x) }) | 120 | # define __pgd(x) ((pgd_t) { (x) }) |
77 | #define __pgprot(x) ((pgprot_t) { (x) }) | 121 | # define __pgprot(x) ((pgprot_t) { (x) }) |
78 | 122 | ||
79 | /** | 123 | /** |
80 | * Conversions for virtual address, physical address, pfn, and struct | 124 | * Conversions for virtual address, physical address, pfn, and struct |
@@ -94,47 +138,83 @@ extern unsigned long max_low_pfn; | |||
94 | extern unsigned long min_low_pfn; | 138 | extern unsigned long min_low_pfn; |
95 | extern unsigned long max_pfn; | 139 | extern unsigned long max_pfn; |
96 | 140 | ||
97 | #define __pa(vaddr) ((unsigned long) (vaddr)) | 141 | extern unsigned long memory_start; |
98 | #define __va(paddr) ((void *) (paddr)) | 142 | extern unsigned long memory_end; |
143 | extern unsigned long memory_size; | ||
99 | 144 | ||
100 | #define phys_to_pfn(phys) (PFN_DOWN(phys)) | 145 | extern int page_is_ram(unsigned long pfn); |
101 | #define pfn_to_phys(pfn) (PFN_PHYS(pfn)) | ||
102 | 146 | ||
103 | #define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) | 147 | # define phys_to_pfn(phys) (PFN_DOWN(phys)) |
104 | #define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) | 148 | # define pfn_to_phys(pfn) (PFN_PHYS(pfn)) |
105 | 149 | ||
106 | #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) | 150 | # define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) |
107 | #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) | 151 | # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) |
108 | 152 | ||
109 | #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) | 153 | # ifdef CONFIG_MMU |
110 | #define page_to_bus(page) (page_to_phys(page)) | 154 | # define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) |
111 | #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) | 155 | # else /* CONFIG_MMU */ |
156 | # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) | ||
157 | # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) | ||
158 | # define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) | ||
159 | # define page_to_bus(page) (page_to_phys(page)) | ||
160 | # define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) | ||
161 | # endif /* CONFIG_MMU */ | ||
112 | 162 | ||
113 | extern unsigned int memory_start; | 163 | # ifndef CONFIG_MMU |
114 | extern unsigned int memory_end; | 164 | # define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr) |
115 | extern unsigned int memory_size; | 165 | # define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
166 | # else /* CONFIG_MMU */ | ||
167 | # define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) | ||
168 | # define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET)) | ||
169 | # define VALID_PAGE(page) ((page - mem_map) < max_mapnr) | ||
170 | # endif /* CONFIG_MMU */ | ||
116 | 171 | ||
117 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) | 172 | # endif /* __ASSEMBLY__ */ |
118 | 173 | ||
119 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) | 174 | #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) |
120 | 175 | ||
121 | #else | ||
122 | #define tophys(rd, rs) (addik rd, rs, 0) | ||
123 | #define tovirt(rd, rs) (addik rd, rs, 0) | ||
124 | #endif /* __ASSEMBLY__ */ | ||
125 | 176 | ||
126 | #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) | 177 | # ifndef CONFIG_MMU |
178 | # define __pa(vaddr) ((unsigned long) (vaddr)) | ||
179 | # define __va(paddr) ((void *) (paddr)) | ||
180 | # else /* CONFIG_MMU */ | ||
181 | # define __pa(x) __virt_to_phys((unsigned long)(x)) | ||
182 | # define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) | ||
183 | # endif /* CONFIG_MMU */ | ||
184 | |||
127 | 185 | ||
128 | /* Convert between virtual and physical address for MMU. */ | 186 | /* Convert between virtual and physical address for MMU. */ |
129 | /* Handle MicroBlaze processor with virtual memory. */ | 187 | /* Handle MicroBlaze processor with virtual memory. */ |
188 | #ifndef CONFIG_MMU | ||
130 | #define __virt_to_phys(addr) addr | 189 | #define __virt_to_phys(addr) addr |
131 | #define __phys_to_virt(addr) addr | 190 | #define __phys_to_virt(addr) addr |
191 | #define tophys(rd, rs) addik rd, rs, 0 | ||
192 | #define tovirt(rd, rs) addik rd, rs, 0 | ||
193 | #else | ||
194 | #define __virt_to_phys(addr) \ | ||
195 | ((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) | ||
196 | #define __phys_to_virt(addr) \ | ||
197 | ((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) | ||
198 | #define tophys(rd, rs) \ | ||
199 | addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START) | ||
200 | #define tovirt(rd, rs) \ | ||
201 | addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR) | ||
202 | #endif /* CONFIG_MMU */ | ||
132 | 203 | ||
133 | #define TOPHYS(addr) __virt_to_phys(addr) | 204 | #define TOPHYS(addr) __virt_to_phys(addr) |
134 | 205 | ||
206 | #ifdef CONFIG_MMU | ||
207 | #ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC | ||
208 | #define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */ | ||
209 | #endif | ||
210 | |||
211 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
212 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
213 | #endif /* CONFIG_MMU */ | ||
214 | |||
135 | #endif /* __KERNEL__ */ | 215 | #endif /* __KERNEL__ */ |
136 | 216 | ||
137 | #include <asm-generic/memory_model.h> | 217 | #include <asm-generic/memory_model.h> |
138 | #include <asm-generic/page.h> | 218 | #include <asm-generic/getorder.h> |
139 | 219 | ||
140 | #endif /* _ASM_MICROBLAZE_PAGE_H */ | 220 | #endif /* _ASM_MICROBLAZE_PAGE_H */ |
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 2a4b35484010..59a757e46ba5 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -9,6 +11,195 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_PGALLOC_H | 11 | #ifndef _ASM_MICROBLAZE_PGALLOC_H |
10 | #define _ASM_MICROBLAZE_PGALLOC_H | 12 | #define _ASM_MICROBLAZE_PGALLOC_H |
11 | 13 | ||
14 | #ifdef CONFIG_MMU | ||
15 | |||
16 | #include <linux/kernel.h> /* For min/max macros */ | ||
17 | #include <linux/highmem.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/cache.h> | ||
22 | |||
23 | #define PGDIR_ORDER 0 | ||
24 | |||
25 | /* | ||
26 | * This is handled very differently on MicroBlaze since out page tables | ||
27 | * are all 0's and I want to be able to use these zero'd pages elsewhere | ||
28 | * as well - it gives us quite a speedup. | ||
29 | * -- Cort | ||
30 | */ | ||
31 | extern struct pgtable_cache_struct { | ||
32 | unsigned long *pgd_cache; | ||
33 | unsigned long *pte_cache; | ||
34 | unsigned long pgtable_cache_sz; | ||
35 | } quicklists; | ||
36 | |||
37 | #define pgd_quicklist (quicklists.pgd_cache) | ||
38 | #define pmd_quicklist ((unsigned long *)0) | ||
39 | #define pte_quicklist (quicklists.pte_cache) | ||
40 | #define pgtable_cache_size (quicklists.pgtable_cache_sz) | ||
41 | |||
42 | extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */ | ||
43 | extern atomic_t zero_sz; /* # currently pre-zero'd pages */ | ||
44 | extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */ | ||
45 | extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */ | ||
46 | extern atomic_t zerototal; /* # pages zero'd over time */ | ||
47 | |||
48 | #define zero_quicklist (zero_cache) | ||
49 | #define zero_cache_sz (zero_sz) | ||
50 | #define zero_cache_calls (zeropage_calls) | ||
51 | #define zero_cache_hits (zeropage_hits) | ||
52 | #define zero_cache_total (zerototal) | ||
53 | |||
54 | /* | ||
55 | * return a pre-zero'd page from the list, | ||
56 | * return NULL if none available -- Cort | ||
57 | */ | ||
58 | extern unsigned long get_zero_page_fast(void); | ||
59 | |||
60 | extern void __bad_pte(pmd_t *pmd); | ||
61 | |||
62 | extern inline pgd_t *get_pgd_slow(void) | ||
63 | { | ||
64 | pgd_t *ret; | ||
65 | |||
66 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER); | ||
67 | if (ret != NULL) | ||
68 | clear_page(ret); | ||
69 | return ret; | ||
70 | } | ||
71 | |||
72 | extern inline pgd_t *get_pgd_fast(void) | ||
73 | { | ||
74 | unsigned long *ret; | ||
75 | |||
76 | ret = pgd_quicklist; | ||
77 | if (ret != NULL) { | ||
78 | pgd_quicklist = (unsigned long *)(*ret); | ||
79 | ret[0] = 0; | ||
80 | pgtable_cache_size--; | ||
81 | } else | ||
82 | ret = (unsigned long *)get_pgd_slow(); | ||
83 | return (pgd_t *)ret; | ||
84 | } | ||
85 | |||
86 | extern inline void free_pgd_fast(pgd_t *pgd) | ||
87 | { | ||
88 | *(unsigned long **)pgd = pgd_quicklist; | ||
89 | pgd_quicklist = (unsigned long *) pgd; | ||
90 | pgtable_cache_size++; | ||
91 | } | ||
92 | |||
93 | extern inline void free_pgd_slow(pgd_t *pgd) | ||
94 | { | ||
95 | free_page((unsigned long)pgd); | ||
96 | } | ||
97 | |||
98 | #define pgd_free(mm, pgd) free_pgd_fast(pgd) | ||
99 | #define pgd_alloc(mm) get_pgd_fast() | ||
100 | |||
101 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
102 | |||
103 | /* | ||
104 | * We don't have any real pmd's, and this code never triggers because | ||
105 | * the pgd will always be present.. | ||
106 | */ | ||
107 | #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) | ||
108 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | ||
109 | /* FIXME two definition - look below */ | ||
110 | #define pmd_free(mm, x) do { } while (0) | ||
111 | #define pgd_populate(mm, pmd, pte) BUG() | ||
112 | |||
113 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
114 | unsigned long address) | ||
115 | { | ||
116 | pte_t *pte; | ||
117 | extern int mem_init_done; | ||
118 | extern void *early_get_page(void); | ||
119 | if (mem_init_done) { | ||
120 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | ||
121 | __GFP_REPEAT | __GFP_ZERO); | ||
122 | } else { | ||
123 | pte = (pte_t *)early_get_page(); | ||
124 | if (pte) | ||
125 | clear_page(pte); | ||
126 | } | ||
127 | return pte; | ||
128 | } | ||
129 | |||
130 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | ||
131 | unsigned long address) | ||
132 | { | ||
133 | struct page *ptepage; | ||
134 | |||
135 | #ifdef CONFIG_HIGHPTE | ||
136 | int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; | ||
137 | #else | ||
138 | int flags = GFP_KERNEL | __GFP_REPEAT; | ||
139 | #endif | ||
140 | |||
141 | ptepage = alloc_pages(flags, 0); | ||
142 | if (ptepage) | ||
143 | clear_highpage(ptepage); | ||
144 | return ptepage; | ||
145 | } | ||
146 | |||
147 | static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, | ||
148 | unsigned long address) | ||
149 | { | ||
150 | unsigned long *ret; | ||
151 | |||
152 | ret = pte_quicklist; | ||
153 | if (ret != NULL) { | ||
154 | pte_quicklist = (unsigned long *)(*ret); | ||
155 | ret[0] = 0; | ||
156 | pgtable_cache_size--; | ||
157 | } | ||
158 | return (pte_t *)ret; | ||
159 | } | ||
160 | |||
161 | extern inline void pte_free_fast(pte_t *pte) | ||
162 | { | ||
163 | *(unsigned long **)pte = pte_quicklist; | ||
164 | pte_quicklist = (unsigned long *) pte; | ||
165 | pgtable_cache_size++; | ||
166 | } | ||
167 | |||
168 | extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
169 | { | ||
170 | free_page((unsigned long)pte); | ||
171 | } | ||
172 | |||
173 | extern inline void pte_free_slow(struct page *ptepage) | ||
174 | { | ||
175 | __free_page(ptepage); | ||
176 | } | ||
177 | |||
178 | extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) | ||
179 | { | ||
180 | __free_page(ptepage); | ||
181 | } | ||
182 | |||
183 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) | ||
184 | |||
185 | #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) | ||
186 | |||
187 | #define pmd_populate_kernel(mm, pmd, pte) \ | ||
188 | (pmd_val(*(pmd)) = (unsigned long) (pte)) | ||
189 | |||
190 | /* | ||
191 | * We don't have any real pmd's, and this code never triggers because | ||
192 | * the pgd will always be present.. | ||
193 | */ | ||
194 | #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) | ||
195 | /*#define pmd_free(mm, x) do { } while (0)*/ | ||
196 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
197 | #define pgd_populate(mm, pmd, pte) BUG() | ||
198 | |||
199 | extern int do_check_pgt_cache(int, int); | ||
200 | |||
201 | #endif /* CONFIG_MMU */ | ||
202 | |||
12 | #define check_pgt_cache() do {} while (0) | 203 | #define check_pgt_cache() do {} while (0) |
13 | 204 | ||
14 | #endif /* _ASM_MICROBLAZE_PGALLOC_H */ | 205 | #endif /* _ASM_MICROBLAZE_PGALLOC_H */ |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 4df31e46568e..4c57a586a989 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -14,6 +16,8 @@ | |||
14 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 16 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
15 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 17 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
16 | 18 | ||
19 | #ifndef CONFIG_MMU | ||
20 | |||
17 | #define pgd_present(pgd) (1) /* pages are always present on non MMU */ | 21 | #define pgd_present(pgd) (1) /* pages are always present on non MMU */ |
18 | #define pgd_none(pgd) (0) | 22 | #define pgd_none(pgd) (0) |
19 | #define pgd_bad(pgd) (0) | 23 | #define pgd_bad(pgd) (0) |
@@ -27,6 +31,8 @@ | |||
27 | #define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ | 31 | #define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ |
28 | #define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ | 32 | #define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ |
29 | 33 | ||
34 | #define pgprot_noncached(x) (x) | ||
35 | |||
30 | #define __swp_type(x) (0) | 36 | #define __swp_type(x) (0) |
31 | #define __swp_offset(x) (0) | 37 | #define __swp_offset(x) (0) |
32 | #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) | 38 | #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) |
@@ -45,6 +51,538 @@ static inline int pte_file(pte_t pte) { return 0; } | |||
45 | 51 | ||
46 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 52 | #define arch_enter_lazy_cpu_mode() do {} while (0) |
47 | 53 | ||
54 | #else /* CONFIG_MMU */ | ||
55 | |||
56 | #include <asm-generic/4level-fixup.h> | ||
57 | |||
58 | #ifdef __KERNEL__ | ||
59 | #ifndef __ASSEMBLY__ | ||
60 | |||
61 | #include <linux/sched.h> | ||
62 | #include <linux/threads.h> | ||
63 | #include <asm/processor.h> /* For TASK_SIZE */ | ||
64 | #include <asm/mmu.h> | ||
65 | #include <asm/page.h> | ||
66 | |||
67 | #define FIRST_USER_ADDRESS 0 | ||
68 | |||
69 | extern unsigned long va_to_phys(unsigned long address); | ||
70 | extern pte_t *va_to_pte(unsigned long address); | ||
71 | extern unsigned long ioremap_bot, ioremap_base; | ||
72 | |||
73 | /* | ||
74 | * The following only work if pte_present() is true. | ||
75 | * Undefined behaviour if not.. | ||
76 | */ | ||
77 | |||
78 | static inline int pte_special(pte_t pte) { return 0; } | ||
79 | |||
80 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
81 | |||
82 | /* Start and end of the vmalloc area. */ | ||
83 | /* Make sure to map the vmalloc area above the pinned kernel memory area | ||
84 | of 32Mb. */ | ||
85 | #define VMALLOC_START (CONFIG_KERNEL_START + \ | ||
86 | max(32 * 1024 * 1024UL, memory_size)) | ||
87 | #define VMALLOC_END ioremap_bot | ||
88 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | ||
89 | |||
90 | #endif /* __ASSEMBLY__ */ | ||
91 | |||
92 | /* | ||
93 | * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash | ||
94 | * table containing PTEs, together with a set of 16 segment registers, to | ||
95 | * define the virtual to physical address mapping. | ||
96 | * | ||
97 | * We use the hash table as an extended TLB, i.e. a cache of currently | ||
98 | * active mappings. We maintain a two-level page table tree, much | ||
99 | * like that used by the i386, for the sake of the Linux memory | ||
100 | * management code. Low-level assembler code in hashtable.S | ||
101 | * (procedure hash_page) is responsible for extracting ptes from the | ||
102 | * tree and putting them into the hash table when necessary, and | ||
103 | * updating the accessed and modified bits in the page table tree. | ||
104 | */ | ||
105 | |||
106 | /* | ||
107 | * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The | ||
108 | * instruction and data sides share a unified, 64-entry, semi-associative | ||
109 | * TLB which is maintained totally under software control. In addition, the | ||
110 | * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative | ||
111 | * TLB which serves as a first level to the shared TLB. These two TLBs are | ||
112 | * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions). | ||
113 | */ | ||
114 | |||
115 | /* | ||
116 | * The normal case is that PTEs are 32-bits and we have a 1-page | ||
117 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | ||
118 | * | ||
119 | */ | ||
120 | |||
121 | /* PMD_SHIFT determines the size of the area mapped by the PTE pages */ | ||
122 | #define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) | ||
123 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
124 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
125 | |||
126 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | ||
127 | #define PGDIR_SHIFT PMD_SHIFT | ||
128 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
129 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
130 | |||
131 | /* | ||
132 | * entries per page directory level: our page-table tree is two-level, so | ||
133 | * we don't really have any PMD directory. | ||
134 | */ | ||
135 | #define PTRS_PER_PTE (1 << PTE_SHIFT) | ||
136 | #define PTRS_PER_PMD 1 | ||
137 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
138 | |||
139 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | ||
140 | #define FIRST_USER_PGD_NR 0 | ||
141 | |||
142 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | ||
143 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | ||
144 | |||
145 | #define pte_ERROR(e) \ | ||
146 | printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \ | ||
147 | __FILE__, __LINE__, pte_val(e)) | ||
148 | #define pmd_ERROR(e) \ | ||
149 | printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ | ||
150 | __FILE__, __LINE__, pmd_val(e)) | ||
151 | #define pgd_ERROR(e) \ | ||
152 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ | ||
153 | __FILE__, __LINE__, pgd_val(e)) | ||
154 | |||
155 | /* | ||
156 | * Bits in a linux-style PTE. These match the bits in the | ||
157 | * (hardware-defined) PTE as closely as possible. | ||
158 | */ | ||
159 | |||
160 | /* There are several potential gotchas here. The hardware TLBLO | ||
161 | * field looks like this: | ||
162 | * | ||
163 | * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ||
164 | * RPN..................... 0 0 EX WR ZSEL....... W I M G | ||
165 | * | ||
166 | * Where possible we make the Linux PTE bits match up with this | ||
167 | * | ||
168 | * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can | ||
169 | * support down to 1k pages), this is done in the TLBMiss exception | ||
170 | * handler. | ||
171 | * - We use only zones 0 (for kernel pages) and 1 (for user pages) | ||
172 | * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB | ||
173 | * miss handler. Bit 27 is PAGE_USER, thus selecting the correct | ||
174 | * zone. | ||
175 | * - PRESENT *must* be in the bottom two bits because swap cache | ||
176 | * entries use the top 30 bits. Because 4xx doesn't support SMP | ||
177 | * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 | ||
178 | * is cleared in the TLB miss handler before the TLB entry is loaded. | ||
179 | * - All other bits of the PTE are loaded into TLBLO without | ||
180 | * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for | ||
181 | * software PTE bits. We actually use use bits 21, 24, 25, and | ||
182 | * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and | ||
183 | * PRESENT. | ||
184 | */ | ||
185 | |||
186 | /* Definitions for MicroBlaze. */ | ||
187 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ | ||
188 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ | ||
189 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ | ||
190 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ | ||
191 | #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ | ||
192 | #define _PAGE_RW 0x040 /* software: Writes permitted */ | ||
193 | #define _PAGE_DIRTY 0x080 /* software: dirty page */ | ||
194 | #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ | ||
195 | #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ | ||
196 | #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ | ||
197 | #define _PMD_PRESENT PAGE_MASK | ||
198 | |||
199 | /* | ||
200 | * Some bits are unused... | ||
201 | */ | ||
202 | #ifndef _PAGE_HASHPTE | ||
203 | #define _PAGE_HASHPTE 0 | ||
204 | #endif | ||
205 | #ifndef _PTE_NONE_MASK | ||
206 | #define _PTE_NONE_MASK 0 | ||
207 | #endif | ||
208 | #ifndef _PAGE_SHARED | ||
209 | #define _PAGE_SHARED 0 | ||
210 | #endif | ||
211 | #ifndef _PAGE_HWWRITE | ||
212 | #define _PAGE_HWWRITE 0 | ||
213 | #endif | ||
214 | #ifndef _PAGE_HWEXEC | ||
215 | #define _PAGE_HWEXEC 0 | ||
216 | #endif | ||
217 | #ifndef _PAGE_EXEC | ||
218 | #define _PAGE_EXEC 0 | ||
219 | #endif | ||
220 | |||
221 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
222 | |||
223 | /* | ||
224 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware | ||
225 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need | ||
226 | * to have it in the Linux PTE, and in fact the bit could be reused for | ||
227 | * another purpose. -- paulus. | ||
228 | */ | ||
229 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) | ||
230 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) | ||
231 | |||
232 | #define _PAGE_KERNEL \ | ||
233 | (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC) | ||
234 | |||
235 | #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) | ||
236 | |||
237 | #define PAGE_NONE __pgprot(_PAGE_BASE) | ||
238 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
239 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
240 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | ||
241 | #define PAGE_SHARED_X \ | ||
242 | __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) | ||
243 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | ||
244 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
245 | |||
246 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
247 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED) | ||
248 | #define PAGE_KERNEL_CI __pgprot(_PAGE_IO) | ||
249 | |||
250 | /* | ||
251 | * We consider execute permission the same as read. | ||
252 | * Also, write permissions imply read permissions. | ||
253 | */ | ||
254 | #define __P000 PAGE_NONE | ||
255 | #define __P001 PAGE_READONLY_X | ||
256 | #define __P010 PAGE_COPY | ||
257 | #define __P011 PAGE_COPY_X | ||
258 | #define __P100 PAGE_READONLY | ||
259 | #define __P101 PAGE_READONLY_X | ||
260 | #define __P110 PAGE_COPY | ||
261 | #define __P111 PAGE_COPY_X | ||
262 | |||
263 | #define __S000 PAGE_NONE | ||
264 | #define __S001 PAGE_READONLY_X | ||
265 | #define __S010 PAGE_SHARED | ||
266 | #define __S011 PAGE_SHARED_X | ||
267 | #define __S100 PAGE_READONLY | ||
268 | #define __S101 PAGE_READONLY_X | ||
269 | #define __S110 PAGE_SHARED | ||
270 | #define __S111 PAGE_SHARED_X | ||
271 | |||
272 | #ifndef __ASSEMBLY__ | ||
273 | /* | ||
274 | * ZERO_PAGE is a global shared page that is always zero: used | ||
275 | * for zero-mapped memory areas etc.. | ||
276 | */ | ||
277 | extern unsigned long empty_zero_page[1024]; | ||
278 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
279 | |||
280 | #endif /* __ASSEMBLY__ */ | ||
281 | |||
282 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) | ||
283 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | ||
284 | #define pte_clear(mm, addr, ptep) \ | ||
285 | do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) | ||
286 | |||
287 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
288 | #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0) | ||
289 | #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0) | ||
290 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) | ||
291 | |||
292 | #define pte_page(x) (mem_map + (unsigned long) \ | ||
293 | ((pte_val(x) - memory_start) >> PAGE_SHIFT)) | ||
294 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) | ||
295 | |||
296 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) | ||
297 | |||
298 | #define pfn_pte(pfn, prot) \ | ||
299 | __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot)) | ||
300 | |||
301 | #ifndef __ASSEMBLY__ | ||
302 | /* | ||
303 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
304 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
305 | * into the pgd entry) | ||
306 | */ | ||
307 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
308 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
309 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
310 | #define pgd_clear(xp) do { } while (0) | ||
311 | #define pgd_page(pgd) \ | ||
312 | ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) | ||
313 | |||
314 | /* | ||
315 | * The following only work if pte_present() is true. | ||
316 | * Undefined behaviour if not.. | ||
317 | */ | ||
318 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | ||
319 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
320 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } | ||
321 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
322 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
323 | /* FIXME */ | ||
324 | static inline int pte_file(pte_t pte) { return 0; } | ||
325 | |||
326 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | ||
327 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | ||
328 | |||
329 | static inline pte_t pte_rdprotect(pte_t pte) \ | ||
330 | { pte_val(pte) &= ~_PAGE_USER; return pte; } | ||
331 | static inline pte_t pte_wrprotect(pte_t pte) \ | ||
332 | { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } | ||
333 | static inline pte_t pte_exprotect(pte_t pte) \ | ||
334 | { pte_val(pte) &= ~_PAGE_EXEC; return pte; } | ||
335 | static inline pte_t pte_mkclean(pte_t pte) \ | ||
336 | { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | ||
337 | static inline pte_t pte_mkold(pte_t pte) \ | ||
338 | { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
339 | |||
340 | static inline pte_t pte_mkread(pte_t pte) \ | ||
341 | { pte_val(pte) |= _PAGE_USER; return pte; } | ||
342 | static inline pte_t pte_mkexec(pte_t pte) \ | ||
343 | { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } | ||
344 | static inline pte_t pte_mkwrite(pte_t pte) \ | ||
345 | { pte_val(pte) |= _PAGE_RW; return pte; } | ||
346 | static inline pte_t pte_mkdirty(pte_t pte) \ | ||
347 | { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
348 | static inline pte_t pte_mkyoung(pte_t pte) \ | ||
349 | { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
350 | |||
351 | /* | ||
352 | * Conversion functions: convert a page and protection to a page entry, | ||
353 | * and a page entry and page directory to the page they refer to. | ||
354 | */ | ||
355 | |||
356 | static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot) | ||
357 | { | ||
358 | pte_t pte; | ||
359 | pte_val(pte) = physpage | pgprot_val(pgprot); | ||
360 | return pte; | ||
361 | } | ||
362 | |||
363 | #define mk_pte(page, pgprot) \ | ||
364 | ({ \ | ||
365 | pte_t pte; \ | ||
366 | pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \ | ||
367 | pgprot_val(pgprot); \ | ||
368 | pte; \ | ||
369 | }) | ||
370 | |||
371 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
372 | { | ||
373 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | ||
374 | return pte; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Atomic PTE updates. | ||
379 | * | ||
380 | * pte_update clears and sets bit atomically, and returns | ||
381 | * the old pte value. | ||
382 | * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant | ||
383 | * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits. | ||
384 | */ | ||
385 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, | ||
386 | unsigned long set) | ||
387 | { | ||
388 | unsigned long old, tmp, msr; | ||
389 | |||
390 | __asm__ __volatile__("\ | ||
391 | msrclr %2, 0x2\n\ | ||
392 | nop\n\ | ||
393 | lw %0, %4, r0\n\ | ||
394 | andn %1, %0, %5\n\ | ||
395 | or %1, %1, %6\n\ | ||
396 | sw %1, %4, r0\n\ | ||
397 | mts rmsr, %2\n\ | ||
398 | nop" | ||
399 | : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) | ||
400 | : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) | ||
401 | : "cc"); | ||
402 | |||
403 | return old; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * set_pte stores a linux PTE into the linux page table. | ||
408 | */ | ||
409 | static inline void set_pte(struct mm_struct *mm, unsigned long addr, | ||
410 | pte_t *ptep, pte_t pte) | ||
411 | { | ||
412 | *ptep = pte; | ||
413 | } | ||
414 | |||
415 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
416 | pte_t *ptep, pte_t pte) | ||
417 | { | ||
418 | *ptep = pte; | ||
419 | } | ||
420 | |||
421 | static inline int ptep_test_and_clear_young(struct mm_struct *mm, | ||
422 | unsigned long addr, pte_t *ptep) | ||
423 | { | ||
424 | return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; | ||
425 | } | ||
426 | |||
427 | static inline int ptep_test_and_clear_dirty(struct mm_struct *mm, | ||
428 | unsigned long addr, pte_t *ptep) | ||
429 | { | ||
430 | return (pte_update(ptep, \ | ||
431 | (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; | ||
432 | } | ||
433 | |||
434 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | ||
435 | unsigned long addr, pte_t *ptep) | ||
436 | { | ||
437 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | ||
438 | } | ||
439 | |||
440 | /*static inline void ptep_set_wrprotect(struct mm_struct *mm, | ||
441 | unsigned long addr, pte_t *ptep) | ||
442 | { | ||
443 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); | ||
444 | }*/ | ||
445 | |||
446 | static inline void ptep_mkdirty(struct mm_struct *mm, | ||
447 | unsigned long addr, pte_t *ptep) | ||
448 | { | ||
449 | pte_update(ptep, 0, _PAGE_DIRTY); | ||
450 | } | ||
451 | |||
452 | /*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/ | ||
453 | |||
454 | /* Convert pmd entry to page */ | ||
455 | /* our pmd entry is an effective address of pte table*/ | ||
456 | /* returns effective address of the pmd entry*/ | ||
457 | #define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | ||
458 | |||
459 | /* returns struct *page of the pmd entry*/ | ||
460 | #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT)) | ||
461 | |||
462 | /* to find an entry in a kernel page-table-directory */ | ||
463 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
464 | |||
465 | /* to find an entry in a page-table-directory */ | ||
466 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | ||
467 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
468 | |||
469 | /* Find an entry in the second-level page table.. */ | ||
470 | static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) | ||
471 | { | ||
472 | return (pmd_t *) dir; | ||
473 | } | ||
474 | |||
475 | /* Find an entry in the third-level page table.. */ | ||
476 | #define pte_index(address) \ | ||
477 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
478 | #define pte_offset_kernel(dir, addr) \ | ||
479 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) | ||
480 | #define pte_offset_map(dir, addr) \ | ||
481 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) | ||
482 | #define pte_offset_map_nested(dir, addr) \ | ||
483 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) | ||
484 | |||
485 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
486 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
487 | |||
488 | /* Encode and decode a nonlinear file mapping entry */ | ||
489 | #define PTE_FILE_MAX_BITS 29 | ||
490 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | ||
491 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) }) | ||
492 | |||
493 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
494 | |||
495 | /* | ||
496 | * When flushing the tlb entry for a page, we also need to flush the hash | ||
497 | * table entry. flush_hash_page is assembler (for speed) in hashtable.S. | ||
498 | */ | ||
499 | extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep); | ||
500 | |||
501 | /* Add an HPTE to the hash table */ | ||
502 | extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); | ||
503 | |||
504 | /* | ||
505 | * Encode and decode a swap entry. | ||
506 | * Note that the bits we use in a PTE for representing a swap entry | ||
507 | * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit | ||
508 | * (if used). -- paulus | ||
509 | */ | ||
510 | #define __swp_type(entry) ((entry).val & 0x3f) | ||
511 | #define __swp_offset(entry) ((entry).val >> 6) | ||
512 | #define __swp_entry(type, offset) \ | ||
513 | ((swp_entry_t) { (type) | ((offset) << 6) }) | ||
514 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) | ||
515 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) | ||
516 | |||
517 | |||
518 | /* CONFIG_APUS */ | ||
519 | /* For virtual address to physical address conversion */ | ||
520 | extern void cache_clear(__u32 addr, int length); | ||
521 | extern void cache_push(__u32 addr, int length); | ||
522 | extern int mm_end_of_chunk(unsigned long addr, int len); | ||
523 | extern unsigned long iopa(unsigned long addr); | ||
524 | /* extern unsigned long mm_ptov(unsigned long addr) \ | ||
525 | __attribute__ ((const)); TBD */ | ||
526 | |||
527 | /* Values for nocacheflag and cmode */ | ||
528 | /* These are not used by the APUS kernel_map, but prevents | ||
529 | * compilation errors. | ||
530 | */ | ||
531 | #define IOMAP_FULL_CACHING 0 | ||
532 | #define IOMAP_NOCACHE_SER 1 | ||
533 | #define IOMAP_NOCACHE_NONSER 2 | ||
534 | #define IOMAP_NO_COPYBACK 3 | ||
535 | |||
536 | /* | ||
537 | * Map some physical address range into the kernel address space. | ||
538 | */ | ||
539 | extern unsigned long kernel_map(unsigned long paddr, unsigned long size, | ||
540 | int nocacheflag, unsigned long *memavailp); | ||
541 | |||
542 | /* | ||
543 | * Set cache mode of (kernel space) address range. | ||
544 | */ | ||
545 | extern void kernel_set_cachemode(unsigned long address, unsigned long size, | ||
546 | unsigned int cmode); | ||
547 | |||
548 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | ||
549 | #define kern_addr_valid(addr) (1) | ||
550 | |||
551 | #define io_remap_page_range remap_page_range | ||
552 | |||
553 | /* | ||
554 | * No page table caches to initialise | ||
555 | */ | ||
556 | #define pgtable_cache_init() do { } while (0) | ||
557 | |||
558 | void do_page_fault(struct pt_regs *regs, unsigned long address, | ||
559 | unsigned long error_code); | ||
560 | |||
561 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
562 | unsigned int size, int flags); | ||
563 | |||
564 | void __init adjust_total_lowmem(void); | ||
565 | void mapin_ram(void); | ||
566 | int map_page(unsigned long va, phys_addr_t pa, int flags); | ||
567 | |||
568 | extern int mem_init_done; | ||
569 | extern unsigned long ioremap_base; | ||
570 | extern unsigned long ioremap_bot; | ||
571 | |||
572 | asmlinkage void __init mmu_init(void); | ||
573 | |||
574 | void __init *early_get_page(void); | ||
575 | |||
576 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); | ||
577 | void consistent_free(void *vaddr); | ||
578 | void consistent_sync(void *vaddr, size_t size, int direction); | ||
579 | void consistent_sync_page(struct page *page, unsigned long offset, | ||
580 | size_t size, int direction); | ||
581 | #endif /* __ASSEMBLY__ */ | ||
582 | #endif /* __KERNEL__ */ | ||
583 | |||
584 | #endif /* CONFIG_MMU */ | ||
585 | |||
48 | #ifndef __ASSEMBLY__ | 586 | #ifndef __ASSEMBLY__ |
49 | #include <asm-generic/pgtable.h> | 587 | #include <asm-generic/pgtable.h> |
50 | 588 | ||
diff --git a/arch/microblaze/include/asm/posix_types.h b/arch/microblaze/include/asm/posix_types.h index b4df41c5dde2..8c758b231f37 100644 --- a/arch/microblaze/include/asm/posix_types.h +++ b/arch/microblaze/include/asm/posix_types.h | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | typedef unsigned long __kernel_ino_t; | 18 | typedef unsigned long __kernel_ino_t; |
19 | typedef unsigned int __kernel_mode_t; | 19 | typedef unsigned short __kernel_mode_t; |
20 | typedef unsigned int __kernel_nlink_t; | 20 | typedef unsigned int __kernel_nlink_t; |
21 | typedef long __kernel_off_t; | 21 | typedef long __kernel_off_t; |
22 | typedef int __kernel_pid_t; | 22 | typedef int __kernel_pid_t; |
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 9329029d2614..563c6b9453f0 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Michal Simek | 2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
3 | * Copyright (C) 2008 PetaLogix | 3 | * Copyright (C) 2008-2009 PetaLogix |
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op; | |||
26 | # define cpu_sleep() do {} while (0) | 26 | # define cpu_sleep() do {} while (0) |
27 | # define prepare_to_copy(tsk) do {} while (0) | 27 | # define prepare_to_copy(tsk) do {} while (0) |
28 | 28 | ||
29 | # endif /* __ASSEMBLY__ */ | ||
30 | |||
31 | #define task_pt_regs(tsk) \ | 29 | #define task_pt_regs(tsk) \ |
32 | (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) | 30 | (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) |
33 | 31 | ||
34 | /* Do necessary setup to start up a newly executed thread. */ | 32 | /* Do necessary setup to start up a newly executed thread. */ |
35 | void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); | 33 | void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); |
36 | 34 | ||
35 | # endif /* __ASSEMBLY__ */ | ||
36 | |||
37 | # ifndef CONFIG_MMU | ||
37 | /* | 38 | /* |
38 | * User space process size: memory size | 39 | * User space process size: memory size |
39 | * | 40 | * |
@@ -85,4 +86,90 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |||
85 | # define KSTK_EIP(tsk) (0) | 86 | # define KSTK_EIP(tsk) (0) |
86 | # define KSTK_ESP(tsk) (0) | 87 | # define KSTK_ESP(tsk) (0) |
87 | 88 | ||
89 | # else /* CONFIG_MMU */ | ||
90 | |||
91 | /* | ||
92 | * This is used to define STACK_TOP, and with MMU it must be below | ||
93 | * kernel base to select the correct PGD when handling MMU exceptions. | ||
94 | */ | ||
95 | # define TASK_SIZE (CONFIG_KERNEL_START) | ||
96 | |||
97 | /* | ||
98 | * This decides where the kernel will search for a free chunk of vm | ||
99 | * space during mmap's. | ||
100 | */ | ||
101 | # define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) | ||
102 | |||
103 | # define THREAD_KSP 0 | ||
104 | |||
105 | # ifndef __ASSEMBLY__ | ||
106 | |||
107 | /* | ||
108 | * Default implementation of macro that returns current | ||
109 | * instruction pointer ("program counter"). | ||
110 | */ | ||
111 | # define current_text_addr() ({ __label__ _l; _l: &&_l; }) | ||
112 | |||
113 | /* If you change this, you must change the associated assembly-languages | ||
114 | * constants defined below, THREAD_*. | ||
115 | */ | ||
116 | struct thread_struct { | ||
117 | /* kernel stack pointer (must be first field in structure) */ | ||
118 | unsigned long ksp; | ||
119 | unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ | ||
120 | void *pgdir; /* root of page-table tree */ | ||
121 | struct pt_regs *regs; /* Pointer to saved register state */ | ||
122 | }; | ||
123 | |||
124 | # define INIT_THREAD { \ | ||
125 | .ksp = sizeof init_stack + (unsigned long)init_stack, \ | ||
126 | .pgdir = swapper_pg_dir, \ | ||
127 | } | ||
128 | |||
129 | /* Do necessary setup to start up a newly executed thread. */ | ||
130 | void start_thread(struct pt_regs *regs, | ||
131 | unsigned long pc, unsigned long usp); | ||
132 | |||
133 | /* Free all resources held by a thread. */ | ||
134 | extern inline void release_thread(struct task_struct *dead_task) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
139 | |||
140 | /* Free current thread data structures etc. */ | ||
141 | static inline void exit_thread(void) | ||
142 | { | ||
143 | } | ||
144 | |||
145 | /* Return saved (kernel) PC of a blocked thread. */ | ||
146 | # define thread_saved_pc(tsk) \ | ||
147 | ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0) | ||
148 | |||
149 | unsigned long get_wchan(struct task_struct *p); | ||
150 | |||
151 | /* The size allocated for kernel stacks. This _must_ be a power of two! */ | ||
152 | # define KERNEL_STACK_SIZE 0x2000 | ||
153 | |||
154 | /* Return some info about the user process TASK. */ | ||
155 | # define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE) | ||
156 | # define task_regs(task) ((struct pt_regs *)task_tos(task) - 1) | ||
157 | |||
158 | # define task_pt_regs_plus_args(tsk) \ | ||
159 | (((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE) | ||
160 | |||
161 | # define task_sp(task) (task_regs(task)->r1) | ||
162 | # define task_pc(task) (task_regs(task)->pc) | ||
163 | /* Grotty old names for some. */ | ||
164 | # define KSTK_EIP(task) (task_pc(task)) | ||
165 | # define KSTK_ESP(task) (task_sp(task)) | ||
166 | |||
167 | /* FIXME */ | ||
168 | # define deactivate_mm(tsk, mm) do { } while (0) | ||
169 | |||
170 | # define STACK_TOP TASK_SIZE | ||
171 | # define STACK_TOP_MAX STACK_TOP | ||
172 | |||
173 | # endif /* __ASSEMBLY__ */ | ||
174 | # endif /* CONFIG_MMU */ | ||
88 | #endif /* _ASM_MICROBLAZE_PROCESSOR_H */ | 175 | #endif /* _ASM_MICROBLAZE_PROCESSOR_H */ |
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h index 55015bce5e47..a917dc517736 100644 --- a/arch/microblaze/include/asm/ptrace.h +++ b/arch/microblaze/include/asm/ptrace.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #define _ASM_MICROBLAZE_PTRACE_H | 10 | #define _ASM_MICROBLAZE_PTRACE_H |
11 | 11 | ||
12 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
13 | #include <linux/types.h> | ||
14 | 13 | ||
15 | typedef unsigned long microblaze_reg_t; | 14 | typedef unsigned long microblaze_reg_t; |
16 | 15 | ||
diff --git a/arch/microblaze/include/asm/registers.h b/arch/microblaze/include/asm/registers.h index 834142d9356f..68c3afb73877 100644 --- a/arch/microblaze/include/asm/registers.h +++ b/arch/microblaze/include/asm/registers.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Michal Simek | 2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
3 | * Copyright (C) 2008 PetaLogix | 3 | * Copyright (C) 2008-2009 PetaLogix |
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -30,4 +30,21 @@ | |||
30 | #define FSR_UF (1<<1) /* Underflow */ | 30 | #define FSR_UF (1<<1) /* Underflow */ |
31 | #define FSR_DO (1<<0) /* Denormalized operand error */ | 31 | #define FSR_DO (1<<0) /* Denormalized operand error */ |
32 | 32 | ||
33 | # ifdef CONFIG_MMU | ||
34 | /* Machine State Register (MSR) Fields */ | ||
35 | # define MSR_UM (1<<11) /* User Mode */ | ||
36 | # define MSR_UMS (1<<12) /* User Mode Save */ | ||
37 | # define MSR_VM (1<<13) /* Virtual Mode */ | ||
38 | # define MSR_VMS (1<<14) /* Virtual Mode Save */ | ||
39 | |||
40 | # define MSR_KERNEL (MSR_EE | MSR_VM) | ||
41 | /* # define MSR_USER (MSR_KERNEL | MSR_UM | MSR_IE) */ | ||
42 | # define MSR_KERNEL_VMS (MSR_EE | MSR_VMS) | ||
43 | /* # define MSR_USER_VMS (MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */ | ||
44 | |||
45 | /* Exception State Register (ESR) Fields */ | ||
46 | # define ESR_DIZ (1<<11) /* Zone Protection */ | ||
47 | # define ESR_S (1<<10) /* Store instruction */ | ||
48 | |||
49 | # endif /* CONFIG_MMU */ | ||
33 | #endif /* _ASM_MICROBLAZE_REGISTERS_H */ | 50 | #endif /* _ASM_MICROBLAZE_REGISTERS_H */ |
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h index 8434a43e5421..4487e150b455 100644 --- a/arch/microblaze/include/asm/sections.h +++ b/arch/microblaze/include/asm/sections.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -14,6 +16,7 @@ | |||
14 | # ifndef __ASSEMBLY__ | 16 | # ifndef __ASSEMBLY__ |
15 | extern char _ssbss[], _esbss[]; | 17 | extern char _ssbss[], _esbss[]; |
16 | extern unsigned long __ivt_start[], __ivt_end[]; | 18 | extern unsigned long __ivt_start[], __ivt_end[]; |
19 | extern char _etext[], _stext[]; | ||
17 | 20 | ||
18 | # ifdef CONFIG_MTD_UCLINUX | 21 | # ifdef CONFIG_MTD_UCLINUX |
19 | extern char *_ebss; | 22 | extern char *_ebss; |
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h index 7f5dcc56eea1..0e7102c3fb11 100644 --- a/arch/microblaze/include/asm/segment.h +++ b/arch/microblaze/include/asm/segment.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Michal Simek | 2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
3 | * Copyright (C) 2008 PetaLogix | 3 | * Copyright (C) 2008-2009 PetaLogix |
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -11,7 +11,7 @@ | |||
11 | #ifndef _ASM_MICROBLAZE_SEGMENT_H | 11 | #ifndef _ASM_MICROBLAZE_SEGMENT_H |
12 | #define _ASM_MICROBLAZE_SEGMENT_H | 12 | #define _ASM_MICROBLAZE_SEGMENT_H |
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | 14 | # ifndef __ASSEMBLY__ |
15 | 15 | ||
16 | typedef struct { | 16 | typedef struct { |
17 | unsigned long seg; | 17 | unsigned long seg; |
@@ -29,15 +29,21 @@ typedef struct { | |||
29 | * | 29 | * |
30 | * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. | 30 | * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. |
31 | */ | 31 | */ |
32 | # define KERNEL_DS ((mm_segment_t){0}) | 32 | # define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
33 | |||
34 | # ifndef CONFIG_MMU | ||
35 | # define KERNEL_DS MAKE_MM_SEG(0) | ||
33 | # define USER_DS KERNEL_DS | 36 | # define USER_DS KERNEL_DS |
37 | # else | ||
38 | # define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | ||
39 | # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | ||
40 | # endif | ||
34 | 41 | ||
35 | # define get_ds() (KERNEL_DS) | 42 | # define get_ds() (KERNEL_DS) |
36 | # define get_fs() (current_thread_info()->addr_limit) | 43 | # define get_fs() (current_thread_info()->addr_limit) |
37 | # define set_fs(x) \ | 44 | # define set_fs(val) (current_thread_info()->addr_limit = (val)) |
38 | do { current_thread_info()->addr_limit = (x); } while (0) | ||
39 | 45 | ||
40 | # define segment_eq(a, b) ((a).seg == (b).seg) | 46 | # define segment_eq(a, b) ((a).seg == (b).seg) |
41 | 47 | ||
42 | # endif /* __ASSEMBLY__ */ | 48 | # endif /* __ASSEMBLY__ */ |
43 | #endif /* _ASM_MICROBLAZE_SEGMENT_H */ | 49 | #endif /* _ASM_MICROBLAZE_SEGMENT_H */ |
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 9b98e8e6abae..27f8dafd8c34 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> | 2 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> |
3 | * Copyright (C) 2007-2009 PetaLogix | ||
3 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
4 | * | 5 | * |
5 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -18,7 +19,6 @@ | |||
18 | extern unsigned int boot_cpuid; /* move to smp.h */ | 19 | extern unsigned int boot_cpuid; /* move to smp.h */ |
19 | 20 | ||
20 | extern char cmd_line[COMMAND_LINE_SIZE]; | 21 | extern char cmd_line[COMMAND_LINE_SIZE]; |
21 | # endif/* __KERNEL__ */ | ||
22 | 22 | ||
23 | void early_printk(const char *fmt, ...); | 23 | void early_printk(const char *fmt, ...); |
24 | 24 | ||
@@ -30,6 +30,11 @@ void setup_heartbeat(void); | |||
30 | 30 | ||
31 | unsigned long long sched_clock(void); | 31 | unsigned long long sched_clock(void); |
32 | 32 | ||
33 | # ifdef CONFIG_MMU | ||
34 | extern void mmu_reset(void); | ||
35 | extern void early_console_reg_tlb_alloc(unsigned int addr); | ||
36 | # endif /* CONFIG_MMU */ | ||
37 | |||
33 | void time_init(void); | 38 | void time_init(void); |
34 | void init_IRQ(void); | 39 | void init_IRQ(void); |
35 | void machine_early_init(const char *cmdline, unsigned int ram, | 40 | void machine_early_init(const char *cmdline, unsigned int ram, |
@@ -40,5 +45,6 @@ void machine_shutdown(void); | |||
40 | void machine_halt(void); | 45 | void machine_halt(void); |
41 | void machine_power_off(void); | 46 | void machine_power_off(void); |
42 | 47 | ||
48 | # endif/* __KERNEL__ */ | ||
43 | # endif /* __ASSEMBLY__ */ | 49 | # endif /* __ASSEMBLY__ */ |
44 | #endif /* _ASM_MICROBLAZE_SETUP_H */ | 50 | #endif /* _ASM_MICROBLAZE_SETUP_H */ |
diff --git a/arch/microblaze/include/asm/signal.h b/arch/microblaze/include/asm/signal.h index 9676fad3486c..46bc2267d949 100644 --- a/arch/microblaze/include/asm/signal.h +++ b/arch/microblaze/include/asm/signal.h | |||
@@ -90,7 +90,7 @@ | |||
90 | 90 | ||
91 | # ifndef __ASSEMBLY__ | 91 | # ifndef __ASSEMBLY__ |
92 | # include <linux/types.h> | 92 | # include <linux/types.h> |
93 | # include <asm-generic/signal.h> | 93 | # include <asm-generic/signal-defs.h> |
94 | 94 | ||
95 | /* Avoid too many header ordering problems. */ | 95 | /* Avoid too many header ordering problems. */ |
96 | struct siginfo; | 96 | struct siginfo; |
diff --git a/arch/microblaze/include/asm/stat.h b/arch/microblaze/include/asm/stat.h index 5f18b8aed220..a15f77520bfd 100644 --- a/arch/microblaze/include/asm/stat.h +++ b/arch/microblaze/include/asm/stat.h | |||
@@ -16,58 +16,53 @@ | |||
16 | 16 | ||
17 | #include <linux/posix_types.h> | 17 | #include <linux/posix_types.h> |
18 | 18 | ||
19 | #define STAT_HAVE_NSEC 1 | ||
20 | |||
19 | struct stat { | 21 | struct stat { |
20 | unsigned int st_dev; | 22 | unsigned long st_dev; |
21 | unsigned long st_ino; | 23 | unsigned long st_ino; |
22 | unsigned int st_mode; | 24 | unsigned int st_mode; |
23 | unsigned int st_nlink; | 25 | unsigned int st_nlink; |
24 | unsigned int st_uid; | 26 | unsigned int st_uid; |
25 | unsigned int st_gid; | 27 | unsigned int st_gid; |
26 | unsigned int st_rdev; | 28 | unsigned long st_rdev; |
27 | unsigned long st_size; | 29 | unsigned long __pad1; |
28 | unsigned long st_blksize; | 30 | long st_size; |
29 | unsigned long st_blocks; | 31 | int st_blksize; |
30 | unsigned long st_atime; | 32 | int __pad2; |
31 | unsigned long __unused1; /* unsigned long st_atime_nsec */ | 33 | long st_blocks; |
32 | unsigned long st_mtime; | 34 | int st_atime; |
33 | unsigned long __unused2; /* unsigned long st_mtime_nsec */ | 35 | unsigned int st_atime_nsec; |
34 | unsigned long st_ctime; | 36 | int st_mtime; |
35 | unsigned long __unused3; /* unsigned long st_ctime_nsec */ | 37 | unsigned int st_mtime_nsec; |
38 | int st_ctime; | ||
39 | unsigned int st_ctime_nsec; | ||
36 | unsigned long __unused4; | 40 | unsigned long __unused4; |
37 | unsigned long __unused5; | 41 | unsigned long __unused5; |
38 | }; | 42 | }; |
39 | 43 | ||
40 | struct stat64 { | 44 | struct stat64 { |
41 | unsigned long long st_dev; | 45 | unsigned long long st_dev; /* Device. */ |
42 | unsigned long __unused1; | 46 | unsigned long long st_ino; /* File serial number. */ |
43 | 47 | unsigned int st_mode; /* File mode. */ | |
44 | unsigned long long st_ino; | 48 | unsigned int st_nlink; /* Link count. */ |
45 | 49 | unsigned int st_uid; /* User ID of the file's owner. */ | |
46 | unsigned int st_mode; | 50 | unsigned int st_gid; /* Group ID of the file's group. */ |
47 | unsigned int st_nlink; | 51 | unsigned long long st_rdev; /* Device number, if device. */ |
48 | 52 | unsigned long long __pad1; | |
49 | unsigned int st_uid; | 53 | long long st_size; /* Size of file, in bytes. */ |
50 | unsigned int st_gid; | 54 | int st_blksize; /* Optimal block size for I/O. */ |
51 | 55 | int __pad2; | |
52 | unsigned long long st_rdev; | 56 | long long st_blocks; /* Number 512-byte blocks allocated. */ |
53 | unsigned long __unused3; | 57 | int st_atime; /* Time of last access. */ |
54 | 58 | unsigned int st_atime_nsec; | |
55 | long long st_size; | 59 | int st_mtime; /* Time of last modification. */ |
56 | unsigned long st_blksize; | 60 | unsigned int st_mtime_nsec; |
57 | 61 | int st_ctime; /* Time of last status change. */ | |
58 | unsigned long st_blocks; /* No. of 512-byte blocks allocated */ | 62 | unsigned int st_ctime_nsec; |
59 | unsigned long __unused4; /* future possible st_blocks high bits */ | 63 | unsigned int __unused4; |
60 | 64 | unsigned int __unused5; | |
61 | unsigned long st_atime; | ||
62 | unsigned long st_atime_nsec; | ||
63 | |||
64 | unsigned long st_mtime; | ||
65 | unsigned long st_mtime_nsec; | ||
66 | |||
67 | unsigned long st_ctime; | ||
68 | unsigned long st_ctime_nsec; | ||
69 | |||
70 | unsigned long __unused8; | ||
71 | }; | 65 | }; |
72 | 66 | ||
73 | #endif /* _ASM_MICROBLAZE_STAT_H */ | 67 | #endif /* _ASM_MICROBLAZE_STAT_H */ |
68 | |||
diff --git a/arch/microblaze/include/asm/string.h b/arch/microblaze/include/asm/string.h index f7728c90fc18..aec2f59298b8 100644 --- a/arch/microblaze/include/asm/string.h +++ b/arch/microblaze/include/asm/string.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_STRING_H | 9 | #ifndef _ASM_MICROBLAZE_STRING_H |
10 | #define _ASM_MICROBLAZE_STRING_H | 10 | #define _ASM_MICROBLAZE_STRING_H |
11 | 11 | ||
12 | #ifndef __KERNEL__ | 12 | #ifdef __KERNEL__ |
13 | 13 | ||
14 | #define __HAVE_ARCH_MEMSET | 14 | #define __HAVE_ARCH_MEMSET |
15 | #define __HAVE_ARCH_MEMCPY | 15 | #define __HAVE_ARCH_MEMCPY |
diff --git a/arch/microblaze/include/asm/syscalls.h b/arch/microblaze/include/asm/syscalls.h index 9cb4ff0edeb2..ddea9eb31f8d 100644 --- a/arch/microblaze/include/asm/syscalls.h +++ b/arch/microblaze/include/asm/syscalls.h | |||
@@ -34,6 +34,9 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, | |||
34 | asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act, | 34 | asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act, |
35 | struct old_sigaction *oact); | 35 | struct old_sigaction *oact); |
36 | 36 | ||
37 | asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, | ||
38 | struct sigaction __user *oact, size_t sigsetsize); | ||
39 | |||
37 | asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 40 | asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
38 | struct pt_regs *regs); | 41 | struct pt_regs *regs); |
39 | 42 | ||
diff --git a/arch/microblaze/include/asm/termios.h b/arch/microblaze/include/asm/termios.h index 102d77258668..47a46d1fbe26 100644 --- a/arch/microblaze/include/asm/termios.h +++ b/arch/microblaze/include/asm/termios.h | |||
@@ -81,7 +81,7 @@ struct termio { | |||
81 | 81 | ||
82 | #ifdef __KERNEL__ | 82 | #ifdef __KERNEL__ |
83 | 83 | ||
84 | #include <asm-generic/termios.h> | 84 | #include <asm-generic/termios-base.h> |
85 | 85 | ||
86 | #endif /* __KERNEL__ */ | 86 | #endif /* __KERNEL__ */ |
87 | 87 | ||
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 4c3943e3f403..7fac44498445 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h | |||
@@ -122,6 +122,8 @@ static inline struct thread_info *current_thread_info(void) | |||
122 | #define TIF_SINGLESTEP 4 | 122 | #define TIF_SINGLESTEP 4 |
123 | #define TIF_IRET 5 /* return with iret */ | 123 | #define TIF_IRET 5 /* return with iret */ |
124 | #define TIF_MEMDIE 6 | 124 | #define TIF_MEMDIE 6 |
125 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ | ||
126 | #define TIF_SECCOMP 10 /* secure computing */ | ||
125 | #define TIF_FREEZE 14 /* Freezing for suspend */ | 127 | #define TIF_FREEZE 14 /* Freezing for suspend */ |
126 | 128 | ||
127 | /* FIXME change in entry.S */ | 129 | /* FIXME change in entry.S */ |
@@ -138,10 +140,17 @@ static inline struct thread_info *current_thread_info(void) | |||
138 | #define _TIF_IRET (1<<TIF_IRET) | 140 | #define _TIF_IRET (1<<TIF_IRET) |
139 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | 141 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) |
140 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 142 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
143 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | ||
144 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | ||
141 | #define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) | 145 | #define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) |
142 | 146 | ||
147 | /* work to do in syscall trace */ | ||
148 | #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | ||
149 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) | ||
150 | |||
143 | /* work to do on interrupt/exception return */ | 151 | /* work to do on interrupt/exception return */ |
144 | #define _TIF_WORK_MASK 0x0000FFFE | 152 | #define _TIF_WORK_MASK 0x0000FFFE |
153 | |||
145 | /* work to do on any return to u-space */ | 154 | /* work to do on any return to u-space */ |
146 | #define _TIF_ALLWORK_MASK 0x0000FFFF | 155 | #define _TIF_ALLWORK_MASK 0x0000FFFF |
147 | 156 | ||
@@ -154,6 +163,17 @@ static inline struct thread_info *current_thread_info(void) | |||
154 | */ | 163 | */ |
155 | /* FPU was used by this task this quantum (SMP) */ | 164 | /* FPU was used by this task this quantum (SMP) */ |
156 | #define TS_USEDFPU 0x0001 | 165 | #define TS_USEDFPU 0x0001 |
166 | #define TS_RESTORE_SIGMASK 0x0002 | ||
167 | |||
168 | #ifndef __ASSEMBLY__ | ||
169 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
170 | static inline void set_restore_sigmask(void) | ||
171 | { | ||
172 | struct thread_info *ti = current_thread_info(); | ||
173 | ti->status |= TS_RESTORE_SIGMASK; | ||
174 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); | ||
175 | } | ||
176 | #endif | ||
157 | 177 | ||
158 | #endif /* __KERNEL__ */ | 178 | #endif /* __KERNEL__ */ |
159 | #endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ | 179 | #endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ |
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h index d1dfe3791127..c472d2801132 100644 --- a/arch/microblaze/include/asm/tlb.h +++ b/arch/microblaze/include/asm/tlb.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -13,4 +15,10 @@ | |||
13 | 15 | ||
14 | #include <asm-generic/tlb.h> | 16 | #include <asm-generic/tlb.h> |
15 | 17 | ||
18 | #ifdef CONFIG_MMU | ||
19 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
20 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
21 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) | ||
22 | #endif | ||
23 | |||
16 | #endif /* _ASM_MICROBLAZE_TLB_H */ | 24 | #endif /* _ASM_MICROBLAZE_TLB_H */ |
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index d7fe7629001b..eb31a0e8a772 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -9,6 +11,50 @@ | |||
9 | #ifndef _ASM_MICROBLAZE_TLBFLUSH_H | 11 | #ifndef _ASM_MICROBLAZE_TLBFLUSH_H |
10 | #define _ASM_MICROBLAZE_TLBFLUSH_H | 12 | #define _ASM_MICROBLAZE_TLBFLUSH_H |
11 | 13 | ||
14 | #ifdef CONFIG_MMU | ||
15 | |||
16 | #include <linux/sched.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/processor.h> /* For TASK_SIZE */ | ||
19 | #include <asm/mmu.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | |||
23 | extern void _tlbie(unsigned long address); | ||
24 | extern void _tlbia(void); | ||
25 | |||
26 | #define __tlbia() _tlbia() | ||
27 | |||
28 | static inline void local_flush_tlb_all(void) | ||
29 | { __tlbia(); } | ||
30 | static inline void local_flush_tlb_mm(struct mm_struct *mm) | ||
31 | { __tlbia(); } | ||
32 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, | ||
33 | unsigned long vmaddr) | ||
34 | { _tlbie(vmaddr); } | ||
35 | static inline void local_flush_tlb_range(struct vm_area_struct *vma, | ||
36 | unsigned long start, unsigned long end) | ||
37 | { __tlbia(); } | ||
38 | |||
39 | #define flush_tlb_kernel_range(start, end) do { } while (0) | ||
40 | |||
41 | #define update_mmu_cache(vma, addr, pte) do { } while (0) | ||
42 | |||
43 | #define flush_tlb_all local_flush_tlb_all | ||
44 | #define flush_tlb_mm local_flush_tlb_mm | ||
45 | #define flush_tlb_page local_flush_tlb_page | ||
46 | #define flush_tlb_range local_flush_tlb_range | ||
47 | |||
48 | /* | ||
49 | * This is called in munmap when we have freed up some page-table | ||
50 | * pages. We don't need to do anything here, there's nothing special | ||
51 | * about our page-table pages. -- paulus | ||
52 | */ | ||
53 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | ||
54 | unsigned long start, unsigned long end) { } | ||
55 | |||
56 | #else /* CONFIG_MMU */ | ||
57 | |||
12 | #define flush_tlb() BUG() | 58 | #define flush_tlb() BUG() |
13 | #define flush_tlb_all() BUG() | 59 | #define flush_tlb_all() BUG() |
14 | #define flush_tlb_mm(mm) BUG() | 60 | #define flush_tlb_mm(mm) BUG() |
@@ -17,4 +63,6 @@ | |||
17 | #define flush_tlb_pgtables(mm, start, end) BUG() | 63 | #define flush_tlb_pgtables(mm, start, end) BUG() |
18 | #define flush_tlb_kernel_range(start, end) BUG() | 64 | #define flush_tlb_kernel_range(start, end) BUG() |
19 | 65 | ||
66 | #endif /* CONFIG_MMU */ | ||
67 | |||
20 | #endif /* _ASM_MICROBLAZE_TLBFLUSH_H */ | 68 | #endif /* _ASM_MICROBLAZE_TLBFLUSH_H */ |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 5a3ffc308e12..65adad61e7e9 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -1,4 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2008-2009 PetaLogix | ||
2 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
3 | * | 5 | * |
4 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -26,6 +28,10 @@ | |||
26 | #define VERIFY_READ 0 | 28 | #define VERIFY_READ 0 |
27 | #define VERIFY_WRITE 1 | 29 | #define VERIFY_WRITE 1 |
28 | 30 | ||
31 | #define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) | ||
32 | |||
33 | #ifndef CONFIG_MMU | ||
34 | |||
29 | extern int ___range_ok(unsigned long addr, unsigned long size); | 35 | extern int ___range_ok(unsigned long addr, unsigned long size); |
30 | 36 | ||
31 | #define __range_ok(addr, size) \ | 37 | #define __range_ok(addr, size) \ |
@@ -34,68 +40,68 @@ extern int ___range_ok(unsigned long addr, unsigned long size); | |||
34 | #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) | 40 | #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) |
35 | #define __access_ok(add, size) (__range_ok((addr), (size)) == 0) | 41 | #define __access_ok(add, size) (__range_ok((addr), (size)) == 0) |
36 | 42 | ||
37 | extern inline int bad_user_access_length(void) | 43 | /* Undefined function to trigger linker error */ |
38 | { | 44 | extern int bad_user_access_length(void); |
39 | return 0; | 45 | |
40 | } | ||
41 | /* FIXME this is function for optimalization -> memcpy */ | 46 | /* FIXME this is function for optimalization -> memcpy */ |
42 | #define __get_user(var, ptr) \ | 47 | #define __get_user(var, ptr) \ |
43 | ({ \ | 48 | ({ \ |
44 | int __gu_err = 0; \ | 49 | int __gu_err = 0; \ |
45 | switch (sizeof(*(ptr))) { \ | 50 | switch (sizeof(*(ptr))) { \ |
46 | case 1: \ | 51 | case 1: \ |
47 | case 2: \ | 52 | case 2: \ |
48 | case 4: \ | 53 | case 4: \ |
49 | (var) = *(ptr); \ | 54 | (var) = *(ptr); \ |
50 | break; \ | 55 | break; \ |
51 | case 8: \ | 56 | case 8: \ |
52 | memcpy((void *) &(var), (ptr), 8); \ | 57 | memcpy((void *) &(var), (ptr), 8); \ |
53 | break; \ | 58 | break; \ |
54 | default: \ | 59 | default: \ |
55 | (var) = 0; \ | 60 | (var) = 0; \ |
56 | __gu_err = __get_user_bad(); \ | 61 | __gu_err = __get_user_bad(); \ |
57 | break; \ | 62 | break; \ |
58 | } \ | 63 | } \ |
59 | __gu_err; \ | 64 | __gu_err; \ |
60 | }) | 65 | }) |
61 | 66 | ||
62 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) | 67 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) |
63 | 68 | ||
69 | /* FIXME is not there defined __pu_val */ | ||
64 | #define __put_user(var, ptr) \ | 70 | #define __put_user(var, ptr) \ |
65 | ({ \ | 71 | ({ \ |
66 | int __pu_err = 0; \ | 72 | int __pu_err = 0; \ |
67 | switch (sizeof(*(ptr))) { \ | 73 | switch (sizeof(*(ptr))) { \ |
68 | case 1: \ | 74 | case 1: \ |
69 | case 2: \ | 75 | case 2: \ |
70 | case 4: \ | 76 | case 4: \ |
71 | *(ptr) = (var); \ | 77 | *(ptr) = (var); \ |
72 | break; \ | 78 | break; \ |
73 | case 8: { \ | 79 | case 8: { \ |
74 | typeof(*(ptr)) __pu_val = var; \ | 80 | typeof(*(ptr)) __pu_val = (var); \ |
75 | memcpy(ptr, &__pu_val, sizeof(__pu_val));\ | 81 | memcpy(ptr, &__pu_val, sizeof(__pu_val)); \ |
76 | } \ | 82 | } \ |
77 | break; \ | 83 | break; \ |
78 | default: \ | 84 | default: \ |
79 | __pu_err = __put_user_bad(); \ | 85 | __pu_err = __put_user_bad(); \ |
80 | break; \ | 86 | break; \ |
81 | } \ | 87 | } \ |
82 | __pu_err; \ | 88 | __pu_err; \ |
83 | }) | 89 | }) |
84 | 90 | ||
85 | #define __put_user_bad() (bad_user_access_length(), (-EFAULT)) | 91 | #define __put_user_bad() (bad_user_access_length(), (-EFAULT)) |
86 | 92 | ||
87 | #define put_user(x, ptr) __put_user(x, ptr) | 93 | #define put_user(x, ptr) __put_user((x), (ptr)) |
88 | #define get_user(x, ptr) __get_user(x, ptr) | 94 | #define get_user(x, ptr) __get_user((x), (ptr)) |
89 | 95 | ||
90 | #define copy_to_user(to, from, n) (memcpy(to, from, n), 0) | 96 | #define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) |
91 | #define copy_from_user(to, from, n) (memcpy(to, from, n), 0) | 97 | #define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) |
92 | 98 | ||
93 | #define __copy_to_user(to, from, n) (copy_to_user(to, from, n)) | 99 | #define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) |
94 | #define __copy_from_user(to, from, n) (copy_from_user(to, from, n)) | 100 | #define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n))) |
95 | #define __copy_to_user_inatomic(to, from, n) (__copy_to_user(to, from, n)) | 101 | #define __copy_to_user_inatomic(to, from, n) \ |
96 | #define __copy_from_user_inatomic(to, from, n) (__copy_from_user(to, from, n)) | 102 | (__copy_to_user((to), (from), (n))) |
97 | 103 | #define __copy_from_user_inatomic(to, from, n) \ | |
98 | #define __clear_user(addr, n) (memset((void *)addr, 0, n), 0) | 104 | (__copy_from_user((to), (from), (n))) |
99 | 105 | ||
100 | static inline unsigned long clear_user(void *addr, unsigned long size) | 106 | static inline unsigned long clear_user(void *addr, unsigned long size) |
101 | { | 107 | { |
@@ -104,13 +110,200 @@ static inline unsigned long clear_user(void *addr, unsigned long size) | |||
104 | return size; | 110 | return size; |
105 | } | 111 | } |
106 | 112 | ||
107 | /* Returns 0 if exception not found and fixup otherwise. */ | 113 | /* Returns 0 if exception not found and fixup otherwise. */ |
108 | extern unsigned long search_exception_table(unsigned long); | 114 | extern unsigned long search_exception_table(unsigned long); |
109 | 115 | ||
116 | extern long strncpy_from_user(char *dst, const char *src, long count); | ||
117 | extern long strnlen_user(const char *src, long count); | ||
118 | |||
119 | #else /* CONFIG_MMU */ | ||
120 | |||
121 | /* | ||
122 | * Address is valid if: | ||
123 | * - "addr", "addr + size" and "size" are all below the limit | ||
124 | */ | ||
125 | #define access_ok(type, addr, size) \ | ||
126 | (get_fs().seg > (((unsigned long)(addr)) | \ | ||
127 | (size) | ((unsigned long)(addr) + (size)))) | ||
128 | |||
129 | /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", | ||
130 | type?"WRITE":"READ",addr,size,get_fs().seg)) */ | ||
131 | |||
132 | /* | ||
133 | * All the __XXX versions macros/functions below do not perform | ||
134 | * access checking. It is assumed that the necessary checks have been | ||
135 | * already performed before the finction (macro) is called. | ||
136 | */ | ||
137 | |||
138 | #define get_user(x, ptr) \ | ||
139 | ({ \ | ||
140 | access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ | ||
141 | ? __get_user((x), (ptr)) : -EFAULT; \ | ||
142 | }) | ||
143 | |||
144 | #define put_user(x, ptr) \ | ||
145 | ({ \ | ||
146 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ | ||
147 | ? __put_user((x), (ptr)) : -EFAULT; \ | ||
148 | }) | ||
149 | |||
150 | #define __get_user(x, ptr) \ | ||
151 | ({ \ | ||
152 | unsigned long __gu_val; \ | ||
153 | /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ | ||
154 | long __gu_err; \ | ||
155 | switch (sizeof(*(ptr))) { \ | ||
156 | case 1: \ | ||
157 | __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \ | ||
158 | break; \ | ||
159 | case 2: \ | ||
160 | __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \ | ||
161 | break; \ | ||
162 | case 4: \ | ||
163 | __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ | ||
164 | break; \ | ||
165 | default: \ | ||
166 | __gu_val = 0; __gu_err = -EINVAL; \ | ||
167 | } \ | ||
168 | x = (__typeof__(*(ptr))) __gu_val; \ | ||
169 | __gu_err; \ | ||
170 | }) | ||
171 | |||
172 | #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ | ||
173 | ({ \ | ||
174 | __asm__ __volatile__ ( \ | ||
175 | "1:" insn " %1, %2, r0; \ | ||
176 | addk %0, r0, r0; \ | ||
177 | 2: \ | ||
178 | .section .fixup,\"ax\"; \ | ||
179 | 3: brid 2b; \ | ||
180 | addik %0, r0, %3; \ | ||
181 | .previous; \ | ||
182 | .section __ex_table,\"a\"; \ | ||
183 | .word 1b,3b; \ | ||
184 | .previous;" \ | ||
185 | : "=r"(__gu_err), "=r"(__gu_val) \ | ||
186 | : "r"(__gu_ptr), "i"(-EFAULT) \ | ||
187 | ); \ | ||
188 | }) | ||
189 | |||
190 | #define __put_user(x, ptr) \ | ||
191 | ({ \ | ||
192 | __typeof__(*(ptr)) __gu_val = x; \ | ||
193 | long __gu_err = 0; \ | ||
194 | switch (sizeof(__gu_val)) { \ | ||
195 | case 1: \ | ||
196 | __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ | ||
197 | break; \ | ||
198 | case 2: \ | ||
199 | __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ | ||
200 | break; \ | ||
201 | case 4: \ | ||
202 | __put_user_asm("sw", (ptr), __gu_val, __gu_err); \ | ||
203 | break; \ | ||
204 | case 8: \ | ||
205 | __put_user_asm_8((ptr), __gu_val, __gu_err); \ | ||
206 | break; \ | ||
207 | default: \ | ||
208 | __gu_err = -EINVAL; \ | ||
209 | } \ | ||
210 | __gu_err; \ | ||
211 | }) | ||
212 | |||
213 | #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ | ||
214 | ({ \ | ||
215 | __asm__ __volatile__ (" lwi %0, %1, 0; \ | ||
216 | 1: swi %0, %2, 0; \ | ||
217 | lwi %0, %1, 4; \ | ||
218 | 2: swi %0, %2, 4; \ | ||
219 | addk %0,r0,r0; \ | ||
220 | 3: \ | ||
221 | .section .fixup,\"ax\"; \ | ||
222 | 4: brid 3b; \ | ||
223 | addik %0, r0, %3; \ | ||
224 | .previous; \ | ||
225 | .section __ex_table,\"a\"; \ | ||
226 | .word 1b,4b,2b,4b; \ | ||
227 | .previous;" \ | ||
228 | : "=&r"(__gu_err) \ | ||
229 | : "r"(&__gu_val), \ | ||
230 | "r"(__gu_ptr), "i"(-EFAULT) \ | ||
231 | ); \ | ||
232 | }) | ||
233 | |||
234 | #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ | ||
235 | ({ \ | ||
236 | __asm__ __volatile__ ( \ | ||
237 | "1:" insn " %1, %2, r0; \ | ||
238 | addk %0, r0, r0; \ | ||
239 | 2: \ | ||
240 | .section .fixup,\"ax\"; \ | ||
241 | 3: brid 2b; \ | ||
242 | addik %0, r0, %3; \ | ||
243 | .previous; \ | ||
244 | .section __ex_table,\"a\"; \ | ||
245 | .word 1b,3b; \ | ||
246 | .previous;" \ | ||
247 | : "=r"(__gu_err) \ | ||
248 | : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \ | ||
249 | ); \ | ||
250 | }) | ||
251 | |||
252 | /* | ||
253 | * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. | ||
254 | */ | ||
255 | static inline int clear_user(char *to, int size) | ||
256 | { | ||
257 | if (size && access_ok(VERIFY_WRITE, to, size)) { | ||
258 | __asm__ __volatile__ (" \ | ||
259 | 1: \ | ||
260 | sb r0, %2, r0; \ | ||
261 | addik %0, %0, -1; \ | ||
262 | bneid %0, 1b; \ | ||
263 | addik %2, %2, 1; \ | ||
264 | 2: \ | ||
265 | .section __ex_table,\"a\"; \ | ||
266 | .word 1b,2b; \ | ||
267 | .section .text;" \ | ||
268 | : "=r"(size) \ | ||
269 | : "0"(size), "r"(to) | ||
270 | ); | ||
271 | } | ||
272 | return size; | ||
273 | } | ||
274 | |||
275 | extern unsigned long __copy_tofrom_user(void __user *to, | ||
276 | const void __user *from, unsigned long size); | ||
277 | |||
278 | #define copy_to_user(to, from, n) \ | ||
279 | (access_ok(VERIFY_WRITE, (to), (n)) ? \ | ||
280 | __copy_tofrom_user((void __user *)(to), \ | ||
281 | (__force const void __user *)(from), (n)) \ | ||
282 | : -EFAULT) | ||
283 | |||
284 | #define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) | ||
285 | #define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) | ||
286 | |||
287 | #define copy_from_user(to, from, n) \ | ||
288 | (access_ok(VERIFY_READ, (from), (n)) ? \ | ||
289 | __copy_tofrom_user((__force void __user *)(to), \ | ||
290 | (void __user *)(from), (n)) \ | ||
291 | : -EFAULT) | ||
292 | |||
293 | #define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) | ||
294 | #define __copy_from_user_inatomic(to, from, n) \ | ||
295 | copy_from_user((to), (from), (n)) | ||
296 | |||
297 | extern int __strncpy_user(char *to, const char __user *from, int len); | ||
298 | extern int __strnlen_user(const char __user *sstr, int len); | ||
299 | |||
300 | #define strncpy_from_user(to, from, len) \ | ||
301 | (access_ok(VERIFY_READ, from, 1) ? \ | ||
302 | __strncpy_user(to, from, len) : -EFAULT) | ||
303 | #define strnlen_user(str, len) \ | ||
304 | (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0) | ||
110 | 305 | ||
111 | extern long strncpy_from_user(char *dst, const char __user *src, long count); | 306 | #endif /* CONFIG_MMU */ |
112 | extern long strnlen_user(const char __user *src, long count); | ||
113 | extern long __strncpy_from_user(char *dst, const char __user *src, long count); | ||
114 | 307 | ||
115 | /* | 308 | /* |
116 | * The exception table consists of pairs of addresses: the first is the | 309 | * The exception table consists of pairs of addresses: the first is the |
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h index 9d66b640c910..3658d91ac0fb 100644 --- a/arch/microblaze/include/asm/unaligned.h +++ b/arch/microblaze/include/asm/unaligned.h | |||
@@ -12,7 +12,8 @@ | |||
12 | 12 | ||
13 | # ifdef __KERNEL__ | 13 | # ifdef __KERNEL__ |
14 | 14 | ||
15 | # include <linux/unaligned/access_ok.h> | 15 | # include <linux/unaligned/be_struct.h> |
16 | # include <linux/unaligned/le_byteshift.h> | ||
16 | # include <linux/unaligned/generic.h> | 17 | # include <linux/unaligned/generic.h> |
17 | 18 | ||
18 | # define get_unaligned __get_unaligned_be | 19 | # define get_unaligned __get_unaligned_be |
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index da94bec4ecba..f4a5e19a20eb 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile | |||
@@ -15,5 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | |||
15 | obj-$(CONFIG_SELFMOD) += selfmod.o | 15 | obj-$(CONFIG_SELFMOD) += selfmod.o |
16 | obj-$(CONFIG_HEART_BEAT) += heartbeat.o | 16 | obj-$(CONFIG_HEART_BEAT) += heartbeat.o |
17 | obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o | 17 | obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o |
18 | obj-$(CONFIG_MMU) += misc.o | ||
18 | 19 | ||
19 | obj-y += entry$(MMUEXT).o | 20 | obj-y += entry$(MMUEXT).o |
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index aabd9e9423a6..7bc7b68f97db 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> | ||
2 | * Copyright (C) 2007-2009 PetaLogix | 3 | * Copyright (C) 2007-2009 PetaLogix |
3 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
4 | * | 5 | * |
@@ -68,16 +69,26 @@ int main(int argc, char *argv[]) | |||
68 | 69 | ||
69 | /* struct task_struct */ | 70 | /* struct task_struct */ |
70 | DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); | 71 | DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack)); |
72 | #ifdef CONFIG_MMU | ||
73 | DEFINE(TASK_STATE, offsetof(struct task_struct, state)); | ||
74 | DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); | ||
75 | DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); | ||
76 | DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); | ||
77 | DEFINE(TASK_MM, offsetof(struct task_struct, mm)); | ||
78 | DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | ||
79 | DEFINE(TASK_PID, offsetof(struct task_struct, pid)); | ||
80 | DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); | ||
81 | DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); | ||
82 | BLANK(); | ||
83 | |||
84 | DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); | ||
85 | BLANK(); | ||
86 | #endif | ||
71 | 87 | ||
72 | /* struct thread_info */ | 88 | /* struct thread_info */ |
73 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | 89 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
74 | DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); | ||
75 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 90 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
76 | DEFINE(TI_STATUS, offsetof(struct thread_info, status)); | ||
77 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
78 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | ||
79 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | 91 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); |
80 | DEFINE(TI_RESTART_BLOCK, offsetof(struct thread_info, restart_block)); | ||
81 | DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); | 92 | DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); |
82 | BLANK(); | 93 | BLANK(); |
83 | 94 | ||
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c index 4b0f0fdb9ca0..7de84923ba07 100644 --- a/arch/microblaze/kernel/early_printk.c +++ b/arch/microblaze/kernel/early_printk.c | |||
@@ -87,6 +87,9 @@ int __init setup_early_printk(char *opt) | |||
87 | base_addr = early_uartlite_console(); | 87 | base_addr = early_uartlite_console(); |
88 | if (base_addr) { | 88 | if (base_addr) { |
89 | early_console_initialized = 1; | 89 | early_console_initialized = 1; |
90 | #ifdef CONFIG_MMU | ||
91 | early_console_reg_tlb_alloc(base_addr); | ||
92 | #endif | ||
90 | early_printk("early_printk_console is enabled at 0x%08x\n", | 93 | early_printk("early_printk_console is enabled at 0x%08x\n", |
91 | base_addr); | 94 | base_addr); |
92 | 95 | ||
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S index f24b1268baaf..1fce6b803f54 100644 --- a/arch/microblaze/kernel/entry-nommu.S +++ b/arch/microblaze/kernel/entry-nommu.S | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <asm/thread_info.h> | 12 | #include <asm/thread_info.h> |
13 | #include <asm/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <asm/entry.h> | 14 | #include <asm/entry.h> |
15 | #include <asm/asm-offsets.h> | 15 | #include <asm/asm-offsets.h> |
16 | #include <asm/registers.h> | 16 | #include <asm/registers.h> |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S new file mode 100644 index 000000000000..91a0e7b185dd --- /dev/null +++ b/arch/microblaze/kernel/entry.S | |||
@@ -0,0 +1,1116 @@ | |||
1 | /* | ||
2 | * Low-level system-call handling, trap handlers and context-switching | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
5 | * Copyright (C) 2008-2009 PetaLogix | ||
6 | * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> | ||
7 | * Copyright (C) 2001,2002 NEC Corporation | ||
8 | * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org> | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General | ||
11 | * Public License. See the file COPYING in the main directory of this | ||
12 | * archive for more details. | ||
13 | * | ||
14 | * Written by Miles Bader <miles@gnu.org> | ||
15 | * Heavily modified by John Williams for Microblaze | ||
16 | */ | ||
17 | |||
18 | #include <linux/sys.h> | ||
19 | #include <linux/linkage.h> | ||
20 | |||
21 | #include <asm/entry.h> | ||
22 | #include <asm/current.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/exceptions.h> | ||
25 | #include <asm/asm-offsets.h> | ||
26 | #include <asm/thread_info.h> | ||
27 | |||
28 | #include <asm/page.h> | ||
29 | #include <asm/unistd.h> | ||
30 | |||
31 | #include <linux/errno.h> | ||
32 | #include <asm/signal.h> | ||
33 | |||
34 | /* The size of a state save frame. */ | ||
35 | #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) | ||
36 | |||
37 | /* The offset of the struct pt_regs in a `state save frame' on the stack. */ | ||
38 | #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */ | ||
39 | |||
40 | #define C_ENTRY(name) .globl name; .align 4; name | ||
41 | |||
42 | /* | ||
43 | * Various ways of setting and clearing BIP in flags reg. | ||
44 | * This is mucky, but necessary using microblaze version that | ||
45 | * allows msr ops to write to BIP | ||
46 | */ | ||
47 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | ||
48 | .macro clear_bip | ||
49 | msrclr r11, MSR_BIP | ||
50 | nop | ||
51 | .endm | ||
52 | |||
53 | .macro set_bip | ||
54 | msrset r11, MSR_BIP | ||
55 | nop | ||
56 | .endm | ||
57 | |||
58 | .macro clear_eip | ||
59 | msrclr r11, MSR_EIP | ||
60 | nop | ||
61 | .endm | ||
62 | |||
63 | .macro set_ee | ||
64 | msrset r11, MSR_EE | ||
65 | nop | ||
66 | .endm | ||
67 | |||
68 | .macro disable_irq | ||
69 | msrclr r11, MSR_IE | ||
70 | nop | ||
71 | .endm | ||
72 | |||
73 | .macro enable_irq | ||
74 | msrset r11, MSR_IE | ||
75 | nop | ||
76 | .endm | ||
77 | |||
78 | .macro set_ums | ||
79 | msrset r11, MSR_UMS | ||
80 | nop | ||
81 | msrclr r11, MSR_VMS | ||
82 | nop | ||
83 | .endm | ||
84 | |||
85 | .macro set_vms | ||
86 | msrclr r11, MSR_UMS | ||
87 | nop | ||
88 | msrset r11, MSR_VMS | ||
89 | nop | ||
90 | .endm | ||
91 | |||
92 | .macro clear_vms_ums | ||
93 | msrclr r11, MSR_VMS | ||
94 | nop | ||
95 | msrclr r11, MSR_UMS | ||
96 | nop | ||
97 | .endm | ||
98 | #else | ||
99 | .macro clear_bip | ||
100 | mfs r11, rmsr | ||
101 | nop | ||
102 | andi r11, r11, ~MSR_BIP | ||
103 | mts rmsr, r11 | ||
104 | nop | ||
105 | .endm | ||
106 | |||
107 | .macro set_bip | ||
108 | mfs r11, rmsr | ||
109 | nop | ||
110 | ori r11, r11, MSR_BIP | ||
111 | mts rmsr, r11 | ||
112 | nop | ||
113 | .endm | ||
114 | |||
115 | .macro clear_eip | ||
116 | mfs r11, rmsr | ||
117 | nop | ||
118 | andi r11, r11, ~MSR_EIP | ||
119 | mts rmsr, r11 | ||
120 | nop | ||
121 | .endm | ||
122 | |||
123 | .macro set_ee | ||
124 | mfs r11, rmsr | ||
125 | nop | ||
126 | ori r11, r11, MSR_EE | ||
127 | mts rmsr, r11 | ||
128 | nop | ||
129 | .endm | ||
130 | |||
131 | .macro disable_irq | ||
132 | mfs r11, rmsr | ||
133 | nop | ||
134 | andi r11, r11, ~MSR_IE | ||
135 | mts rmsr, r11 | ||
136 | nop | ||
137 | .endm | ||
138 | |||
139 | .macro enable_irq | ||
140 | mfs r11, rmsr | ||
141 | nop | ||
142 | ori r11, r11, MSR_IE | ||
143 | mts rmsr, r11 | ||
144 | nop | ||
145 | .endm | ||
146 | |||
147 | .macro set_ums | ||
148 | mfs r11, rmsr | ||
149 | nop | ||
150 | ori r11, r11, MSR_VMS | ||
151 | andni r11, r11, MSR_UMS | ||
152 | mts rmsr, r11 | ||
153 | nop | ||
154 | .endm | ||
155 | |||
156 | .macro set_vms | ||
157 | mfs r11, rmsr | ||
158 | nop | ||
159 | ori r11, r11, MSR_VMS | ||
160 | andni r11, r11, MSR_UMS | ||
161 | mts rmsr, r11 | ||
162 | nop | ||
163 | .endm | ||
164 | |||
165 | .macro clear_vms_ums | ||
166 | mfs r11, rmsr | ||
167 | nop | ||
168 | andni r11, r11, (MSR_VMS|MSR_UMS) | ||
169 | mts rmsr,r11 | ||
170 | nop | ||
171 | .endm | ||
172 | #endif | ||
173 | |||
174 | /* Define how to call high-level functions. With MMU, virtual mode must be | ||
175 | * enabled when calling the high-level function. Clobbers R11. | ||
176 | * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL | ||
177 | */ | ||
178 | |||
179 | /* turn on virtual protected mode save */ | ||
180 | #define VM_ON \ | ||
181 | set_ums; \ | ||
182 | rted r0, 2f; \ | ||
183 | 2: nop; | ||
184 | |||
185 | /* turn off virtual protected mode save and user mode save*/ | ||
186 | #define VM_OFF \ | ||
187 | clear_vms_ums; \ | ||
188 | rted r0, TOPHYS(1f); \ | ||
189 | 1: nop; | ||
190 | |||
191 | #define SAVE_REGS \ | ||
192 | swi r2, r1, PTO+PT_R2; /* Save SDA */ \ | ||
193 | swi r5, r1, PTO+PT_R5; \ | ||
194 | swi r6, r1, PTO+PT_R6; \ | ||
195 | swi r7, r1, PTO+PT_R7; \ | ||
196 | swi r8, r1, PTO+PT_R8; \ | ||
197 | swi r9, r1, PTO+PT_R9; \ | ||
198 | swi r10, r1, PTO+PT_R10; \ | ||
199 | swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\ | ||
200 | swi r12, r1, PTO+PT_R12; \ | ||
201 | swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \ | ||
202 | swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \ | ||
203 | swi r15, r1, PTO+PT_R15; /* Save LP */ \ | ||
204 | swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \ | ||
205 | swi r19, r1, PTO+PT_R19; \ | ||
206 | swi r20, r1, PTO+PT_R20; \ | ||
207 | swi r21, r1, PTO+PT_R21; \ | ||
208 | swi r22, r1, PTO+PT_R22; \ | ||
209 | swi r23, r1, PTO+PT_R23; \ | ||
210 | swi r24, r1, PTO+PT_R24; \ | ||
211 | swi r25, r1, PTO+PT_R25; \ | ||
212 | swi r26, r1, PTO+PT_R26; \ | ||
213 | swi r27, r1, PTO+PT_R27; \ | ||
214 | swi r28, r1, PTO+PT_R28; \ | ||
215 | swi r29, r1, PTO+PT_R29; \ | ||
216 | swi r30, r1, PTO+PT_R30; \ | ||
217 | swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ | ||
218 | mfs r11, rmsr; /* save MSR */ \ | ||
219 | nop; \ | ||
220 | swi r11, r1, PTO+PT_MSR; | ||
221 | |||
222 | #define RESTORE_REGS \ | ||
223 | lwi r11, r1, PTO+PT_MSR; \ | ||
224 | mts rmsr , r11; \ | ||
225 | nop; \ | ||
226 | lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ | ||
227 | lwi r5, r1, PTO+PT_R5; \ | ||
228 | lwi r6, r1, PTO+PT_R6; \ | ||
229 | lwi r7, r1, PTO+PT_R7; \ | ||
230 | lwi r8, r1, PTO+PT_R8; \ | ||
231 | lwi r9, r1, PTO+PT_R9; \ | ||
232 | lwi r10, r1, PTO+PT_R10; \ | ||
233 | lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\ | ||
234 | lwi r12, r1, PTO+PT_R12; \ | ||
235 | lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \ | ||
236 | lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ | ||
237 | lwi r15, r1, PTO+PT_R15; /* restore LP */ \ | ||
238 | lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \ | ||
239 | lwi r19, r1, PTO+PT_R19; \ | ||
240 | lwi r20, r1, PTO+PT_R20; \ | ||
241 | lwi r21, r1, PTO+PT_R21; \ | ||
242 | lwi r22, r1, PTO+PT_R22; \ | ||
243 | lwi r23, r1, PTO+PT_R23; \ | ||
244 | lwi r24, r1, PTO+PT_R24; \ | ||
245 | lwi r25, r1, PTO+PT_R25; \ | ||
246 | lwi r26, r1, PTO+PT_R26; \ | ||
247 | lwi r27, r1, PTO+PT_R27; \ | ||
248 | lwi r28, r1, PTO+PT_R28; \ | ||
249 | lwi r29, r1, PTO+PT_R29; \ | ||
250 | lwi r30, r1, PTO+PT_R30; \ | ||
251 | lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ | ||
252 | |||
253 | .text | ||
254 | |||
255 | /* | ||
256 | * User trap. | ||
257 | * | ||
258 | * System calls are handled here. | ||
259 | * | ||
260 | * Syscall protocol: | ||
261 | * Syscall number in r12, args in r5-r10 | ||
262 | * Return value in r3 | ||
263 | * | ||
264 | * Trap entered via brki instruction, so BIP bit is set, and interrupts | ||
265 | * are masked. This is nice, means we don't have to CLI before state save | ||
266 | */ | ||
267 | C_ENTRY(_user_exception): | ||
268 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
269 | addi r14, r14, 4 /* return address is 4 byte after call */ | ||
270 | swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ | ||
271 | |||
272 | lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/ | ||
273 | beqi r11, 1f; /* Jump ahead if coming from user */ | ||
274 | /* Kernel-mode state save. */ | ||
275 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ | ||
276 | tophys(r1,r11); | ||
277 | swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ | ||
278 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
279 | |||
280 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | ||
281 | SAVE_REGS | ||
282 | |||
283 | addi r11, r0, 1; /* Was in kernel-mode. */ | ||
284 | swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */ | ||
285 | brid 2f; | ||
286 | nop; /* Fill delay slot */ | ||
287 | |||
288 | /* User-mode state save. */ | ||
289 | 1: | ||
290 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
291 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | ||
292 | tophys(r1,r1); | ||
293 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ | ||
294 | /* calculate kernel stack pointer from task struct 8k */ | ||
295 | addik r1, r1, THREAD_SIZE; | ||
296 | tophys(r1,r1); | ||
297 | |||
298 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | ||
299 | SAVE_REGS | ||
300 | |||
301 | swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ | ||
302 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | ||
303 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | ||
304 | addi r11, r0, 1; | ||
305 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | ||
306 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | ||
307 | /* Save away the syscall number. */ | ||
308 | swi r12, r1, PTO+PT_R0; | ||
309 | tovirt(r1,r1) | ||
310 | |||
311 | la r15, r0, ret_from_trap-8 | ||
312 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ | ||
313 | /* Jump to the appropriate function for the system call number in r12 | ||
314 | * (r12 is not preserved), or return an error if r12 is not valid. The LP | ||
315 | * register should point to the location where | ||
316 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ | ||
317 | /* See if the system call number is valid. */ | ||
318 | addi r11, r12, -__NR_syscalls; | ||
319 | bgei r11,1f; | ||
320 | /* Figure out which function to use for this system call. */ | ||
321 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | ||
322 | add r12, r12, r12; /* convert num -> ptr */ | ||
323 | add r12, r12, r12; | ||
324 | |||
325 | /* Trac syscalls and stored them to r0_ram */ | ||
326 | lwi r3, r12, 0x400 + TOPHYS(r0_ram) | ||
327 | addi r3, r3, 1 | ||
328 | swi r3, r12, 0x400 + TOPHYS(r0_ram) | ||
329 | |||
330 | lwi r12, r12, TOPHYS(sys_call_table); /* Function ptr */ | ||
331 | /* Make the system call. to r12*/ | ||
332 | set_vms; | ||
333 | rtid r12, 0; | ||
334 | nop; | ||
335 | /* The syscall number is invalid, return an error. */ | ||
336 | 1: VM_ON; /* RETURN() expects virtual mode*/ | ||
337 | addi r3, r0, -ENOSYS; | ||
338 | rtsd r15,8; /* looks like a normal subroutine return */ | ||
339 | or r0, r0, r0 | ||
340 | |||
341 | |||
342 | /* Entry point used to return from a syscall/trap. */ | ||
343 | /* We re-enable BIP bit before state restore */ | ||
344 | C_ENTRY(ret_from_trap): | ||
345 | set_bip; /* Ints masked for state restore*/ | ||
346 | lwi r11, r1, PTO+PT_MODE; | ||
347 | /* See if returning to kernel mode, if so, skip resched &c. */ | ||
348 | bnei r11, 2f; | ||
349 | |||
350 | /* We're returning to user mode, so check for various conditions that | ||
351 | * trigger rescheduling. */ | ||
352 | /* Get current task ptr into r11 */ | ||
353 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
354 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
355 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
356 | andi r11, r11, _TIF_NEED_RESCHED; | ||
357 | beqi r11, 5f; | ||
358 | |||
359 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
360 | swi r4, r1, PTO + PT_R4; | ||
361 | bralid r15, schedule; /* Call scheduler */ | ||
362 | nop; /* delay slot */ | ||
363 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | ||
364 | lwi r4, r1, PTO + PT_R4; | ||
365 | |||
366 | /* Maybe handle a signal */ | ||
367 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
368 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
369 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
370 | andi r11, r11, _TIF_SIGPENDING; | ||
371 | beqi r11, 1f; /* Signals to handle, handle them */ | ||
372 | |||
373 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
374 | swi r4, r1, PTO + PT_R4; | ||
375 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | ||
376 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
377 | addi r7, r0, 1; /* Arg 3: int in_syscall */ | ||
378 | bralid r15, do_signal; /* Handle any signals */ | ||
379 | nop; | ||
380 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | ||
381 | lwi r4, r1, PTO + PT_R4; | ||
382 | |||
383 | /* Finally, return to user state. */ | ||
384 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | ||
385 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
386 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
387 | VM_OFF; | ||
388 | tophys(r1,r1); | ||
389 | RESTORE_REGS; | ||
390 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
391 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ | ||
392 | bri 6f; | ||
393 | |||
394 | /* Return to kernel state. */ | ||
395 | 2: VM_OFF; | ||
396 | tophys(r1,r1); | ||
397 | RESTORE_REGS; | ||
398 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
399 | tovirt(r1,r1); | ||
400 | 6: | ||
401 | TRAP_return: /* Make global symbol for debugging */ | ||
402 | rtbd r14, 0; /* Instructions to return from an IRQ */ | ||
403 | nop; | ||
404 | |||
405 | |||
406 | /* These syscalls need access to the struct pt_regs on the stack, so we | ||
407 | implement them in assembly (they're basically all wrappers anyway). */ | ||
408 | |||
409 | C_ENTRY(sys_fork_wrapper): | ||
410 | addi r5, r0, SIGCHLD /* Arg 0: flags */ | ||
411 | lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ | ||
412 | la r7, r1, PTO /* Arg 2: parent context */ | ||
413 | add r8. r0, r0 /* Arg 3: (unused) */ | ||
414 | add r9, r0, r0; /* Arg 4: (unused) */ | ||
415 | add r10, r0, r0; /* Arg 5: (unused) */ | ||
416 | brid do_fork /* Do real work (tail-call) */ | ||
417 | nop; | ||
418 | |||
419 | /* This the initial entry point for a new child thread, with an appropriate | ||
420 | stack in place that makes it look the the child is in the middle of an | ||
421 | syscall. This function is actually `returned to' from switch_thread | ||
422 | (copy_thread makes ret_from_fork the return address in each new thread's | ||
423 | saved context). */ | ||
424 | C_ENTRY(ret_from_fork): | ||
425 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ | ||
426 | add r3, r5, r0; /* switch_thread returns the prev task */ | ||
427 | /* ( in the delay slot ) */ | ||
428 | add r3, r0, r0; /* Child's fork call should return 0. */ | ||
429 | brid ret_from_trap; /* Do normal trap return */ | ||
430 | nop; | ||
431 | |||
432 | C_ENTRY(sys_vfork_wrapper): | ||
433 | la r5, r1, PTO | ||
434 | brid sys_vfork /* Do real work (tail-call) */ | ||
435 | nop | ||
436 | |||
437 | C_ENTRY(sys_clone_wrapper): | ||
438 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ | ||
439 | lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */ | ||
440 | 1: la r7, r1, PTO; /* Arg 2: parent context */ | ||
441 | add r8, r0, r0; /* Arg 3: (unused) */ | ||
442 | add r9, r0, r0; /* Arg 4: (unused) */ | ||
443 | add r10, r0, r0; /* Arg 5: (unused) */ | ||
444 | brid do_fork /* Do real work (tail-call) */ | ||
445 | nop; | ||
446 | |||
447 | C_ENTRY(sys_execve_wrapper): | ||
448 | la r8, r1, PTO; /* add user context as 4th arg */ | ||
449 | brid sys_execve; /* Do real work (tail-call).*/ | ||
450 | nop; | ||
451 | |||
452 | C_ENTRY(sys_sigsuspend_wrapper): | ||
453 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
454 | swi r4, r1, PTO+PT_R4; | ||
455 | la r6, r1, PTO; /* add user context as 2nd arg */ | ||
456 | bralid r15, sys_sigsuspend; /* Do real work.*/ | ||
457 | nop; | ||
458 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
459 | lwi r4, r1, PTO+PT_R4; | ||
460 | bri ret_from_trap /* fall through will not work here due to align */ | ||
461 | nop; | ||
462 | |||
463 | C_ENTRY(sys_rt_sigsuspend_wrapper): | ||
464 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
465 | swi r4, r1, PTO+PT_R4; | ||
466 | la r7, r1, PTO; /* add user context as 3rd arg */ | ||
467 | brlid r15, sys_rt_sigsuspend; /* Do real work.*/ | ||
468 | nop; | ||
469 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
470 | lwi r4, r1, PTO+PT_R4; | ||
471 | bri ret_from_trap /* fall through will not work here due to align */ | ||
472 | nop; | ||
473 | |||
474 | |||
475 | C_ENTRY(sys_sigreturn_wrapper): | ||
476 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
477 | swi r4, r1, PTO+PT_R4; | ||
478 | la r5, r1, PTO; /* add user context as 1st arg */ | ||
479 | brlid r15, sys_sigreturn; /* Do real work.*/ | ||
480 | nop; | ||
481 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
482 | lwi r4, r1, PTO+PT_R4; | ||
483 | bri ret_from_trap /* fall through will not work here due to align */ | ||
484 | nop; | ||
485 | |||
486 | C_ENTRY(sys_rt_sigreturn_wrapper): | ||
487 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
488 | swi r4, r1, PTO+PT_R4; | ||
489 | la r5, r1, PTO; /* add user context as 1st arg */ | ||
490 | brlid r15, sys_rt_sigreturn /* Do real work */ | ||
491 | nop; | ||
492 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
493 | lwi r4, r1, PTO+PT_R4; | ||
494 | bri ret_from_trap /* fall through will not work here due to align */ | ||
495 | nop; | ||
496 | |||
497 | /* | ||
498 | * HW EXCEPTION rutine start | ||
499 | */ | ||
500 | |||
501 | #define SAVE_STATE \ | ||
502 | swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \ | ||
503 | set_bip; /*equalize initial state for all possible entries*/\ | ||
504 | clear_eip; \ | ||
505 | enable_irq; \ | ||
506 | set_ee; \ | ||
507 | /* See if already in kernel mode.*/ \ | ||
508 | lwi r11, r0, TOPHYS(PER_CPU(KM)); \ | ||
509 | beqi r11, 1f; /* Jump ahead if coming from user */\ | ||
510 | /* Kernel-mode state save. */ \ | ||
511 | /* Reload kernel stack-ptr. */ \ | ||
512 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | ||
513 | tophys(r1,r11); \ | ||
514 | swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \ | ||
515 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\ | ||
516 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\ | ||
517 | /* store return registers separately because \ | ||
518 | * this macros is use for others exceptions */ \ | ||
519 | swi r3, r1, PTO + PT_R3; \ | ||
520 | swi r4, r1, PTO + PT_R4; \ | ||
521 | SAVE_REGS \ | ||
522 | /* PC, before IRQ/trap - this is one instruction above */ \ | ||
523 | swi r17, r1, PTO+PT_PC; \ | ||
524 | \ | ||
525 | addi r11, r0, 1; /* Was in kernel-mode. */ \ | ||
526 | swi r11, r1, PTO+PT_MODE; \ | ||
527 | brid 2f; \ | ||
528 | nop; /* Fill delay slot */ \ | ||
529 | 1: /* User-mode state save. */ \ | ||
530 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\ | ||
531 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | ||
532 | tophys(r1,r1); \ | ||
533 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ | ||
534 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\ | ||
535 | tophys(r1,r1); \ | ||
536 | \ | ||
537 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\ | ||
538 | /* store return registers separately because this macros \ | ||
539 | * is use for others exceptions */ \ | ||
540 | swi r3, r1, PTO + PT_R3; \ | ||
541 | swi r4, r1, PTO + PT_R4; \ | ||
542 | SAVE_REGS \ | ||
543 | /* PC, before IRQ/trap - this is one instruction above FIXME*/ \ | ||
544 | swi r17, r1, PTO+PT_PC; \ | ||
545 | \ | ||
546 | swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \ | ||
547 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | ||
548 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | ||
549 | addi r11, r0, 1; \ | ||
550 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ | ||
551 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | ||
552 | /* Save away the syscall number. */ \ | ||
553 | swi r0, r1, PTO+PT_R0; \ | ||
554 | tovirt(r1,r1) | ||
555 | |||
556 | C_ENTRY(full_exception_trap): | ||
557 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
558 | /* adjust exception address for privileged instruction | ||
559 | * for finding where is it */ | ||
560 | addik r17, r17, -4 | ||
561 | SAVE_STATE /* Save registers */ | ||
562 | /* FIXME this can be store directly in PT_ESR reg. | ||
563 | * I tested it but there is a fault */ | ||
564 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | ||
565 | la r15, r0, ret_from_exc - 8 | ||
566 | la r5, r1, PTO /* parameter struct pt_regs * regs */ | ||
567 | mfs r6, resr | ||
568 | nop | ||
569 | mfs r7, rfsr; /* save FSR */ | ||
570 | nop | ||
571 | la r12, r0, full_exception | ||
572 | set_vms; | ||
573 | rtbd r12, 0; | ||
574 | nop; | ||
575 | |||
576 | /* | ||
577 | * Unaligned data trap. | ||
578 | * | ||
579 | * Unaligned data trap last on 4k page is handled here. | ||
580 | * | ||
581 | * Trap entered via exception, so EE bit is set, and interrupts | ||
582 | * are masked. This is nice, means we don't have to CLI before state save | ||
583 | * | ||
584 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" | ||
585 | */ | ||
586 | C_ENTRY(unaligned_data_trap): | ||
587 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
588 | SAVE_STATE /* Save registers.*/ | ||
589 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | ||
590 | la r15, r0, ret_from_exc-8 | ||
591 | mfs r3, resr /* ESR */ | ||
592 | nop | ||
593 | mfs r4, rear /* EAR */ | ||
594 | nop | ||
595 | la r7, r1, PTO /* parameter struct pt_regs * regs */ | ||
596 | la r12, r0, _unaligned_data_exception | ||
597 | set_vms; | ||
598 | rtbd r12, 0; /* interrupts enabled */ | ||
599 | nop; | ||
600 | |||
601 | /* | ||
602 | * Page fault traps. | ||
603 | * | ||
604 | * If the real exception handler (from hw_exception_handler.S) didn't find | ||
605 | * the mapping for the process, then we're thrown here to handle such situation. | ||
606 | * | ||
607 | * Trap entered via exceptions, so EE bit is set, and interrupts | ||
608 | * are masked. This is nice, means we don't have to CLI before state save | ||
609 | * | ||
610 | * Build a standard exception frame for TLB Access errors. All TLB exceptions | ||
611 | * will bail out to this point if they can't resolve the lightweight TLB fault. | ||
612 | * | ||
613 | * The C function called is in "arch/microblaze/mm/fault.c", declared as: | ||
614 | * void do_page_fault(struct pt_regs *regs, | ||
615 | * unsigned long address, | ||
616 | * unsigned long error_code) | ||
617 | */ | ||
618 | /* data and intruction trap - which is choose is resolved int fault.c */ | ||
619 | C_ENTRY(page_fault_data_trap): | ||
620 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
621 | SAVE_STATE /* Save registers.*/ | ||
622 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | ||
623 | la r15, r0, ret_from_exc-8 | ||
624 | la r5, r1, PTO /* parameter struct pt_regs * regs */ | ||
625 | mfs r6, rear /* parameter unsigned long address */ | ||
626 | nop | ||
627 | mfs r7, resr /* parameter unsigned long error_code */ | ||
628 | nop | ||
629 | la r12, r0, do_page_fault | ||
630 | set_vms; | ||
631 | rtbd r12, 0; /* interrupts enabled */ | ||
632 | nop; | ||
633 | |||
634 | C_ENTRY(page_fault_instr_trap): | ||
635 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | ||
636 | SAVE_STATE /* Save registers.*/ | ||
637 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | ||
638 | la r15, r0, ret_from_exc-8 | ||
639 | la r5, r1, PTO /* parameter struct pt_regs * regs */ | ||
640 | mfs r6, rear /* parameter unsigned long address */ | ||
641 | nop | ||
642 | ori r7, r0, 0 /* parameter unsigned long error_code */ | ||
643 | la r12, r0, do_page_fault | ||
644 | set_vms; | ||
645 | rtbd r12, 0; /* interrupts enabled */ | ||
646 | nop; | ||
647 | |||
648 | /* Entry point used to return from an exception. */ | ||
649 | C_ENTRY(ret_from_exc): | ||
650 | set_bip; /* Ints masked for state restore*/ | ||
651 | lwi r11, r1, PTO+PT_MODE; | ||
652 | bnei r11, 2f; /* See if returning to kernel mode, */ | ||
653 | /* ... if so, skip resched &c. */ | ||
654 | |||
655 | /* We're returning to user mode, so check for various conditions that | ||
656 | trigger rescheduling. */ | ||
657 | /* Get current task ptr into r11 */ | ||
658 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
659 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
660 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
661 | andi r11, r11, _TIF_NEED_RESCHED; | ||
662 | beqi r11, 5f; | ||
663 | |||
664 | /* Call the scheduler before returning from a syscall/trap. */ | ||
665 | bralid r15, schedule; /* Call scheduler */ | ||
666 | nop; /* delay slot */ | ||
667 | |||
668 | /* Maybe handle a signal */ | ||
669 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
670 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
671 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
672 | andi r11, r11, _TIF_SIGPENDING; | ||
673 | beqi r11, 1f; /* Signals to handle, handle them */ | ||
674 | |||
675 | /* | ||
676 | * Handle a signal return; Pending signals should be in r18. | ||
677 | * | ||
678 | * Not all registers are saved by the normal trap/interrupt entry | ||
679 | * points (for instance, call-saved registers (because the normal | ||
680 | * C-compiler calling sequence in the kernel makes sure they're | ||
681 | * preserved), and call-clobbered registers in the case of | ||
682 | * traps), but signal handlers may want to examine or change the | ||
683 | * complete register state. Here we save anything not saved by | ||
684 | * the normal entry sequence, so that it may be safely restored | ||
685 | * (in a possibly modified form) after do_signal returns. | ||
686 | * store return registers separately because this macros is use | ||
687 | * for others exceptions */ | ||
688 | swi r3, r1, PTO + PT_R3; | ||
689 | swi r4, r1, PTO + PT_R4; | ||
690 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | ||
691 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
692 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | ||
693 | bralid r15, do_signal; /* Handle any signals */ | ||
694 | nop; | ||
695 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
696 | lwi r4, r1, PTO+PT_R4; | ||
697 | |||
698 | /* Finally, return to user state. */ | ||
699 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | ||
700 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
701 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
702 | VM_OFF; | ||
703 | tophys(r1,r1); | ||
704 | |||
705 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
706 | lwi r4, r1, PTO+PT_R4; | ||
707 | RESTORE_REGS; | ||
708 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
709 | |||
710 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ | ||
711 | bri 6f; | ||
712 | /* Return to kernel state. */ | ||
713 | 2: VM_OFF; | ||
714 | tophys(r1,r1); | ||
715 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
716 | lwi r4, r1, PTO+PT_R4; | ||
717 | RESTORE_REGS; | ||
718 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
719 | |||
720 | tovirt(r1,r1); | ||
721 | 6: | ||
722 | EXC_return: /* Make global symbol for debugging */ | ||
723 | rtbd r14, 0; /* Instructions to return from an IRQ */ | ||
724 | nop; | ||
725 | |||
726 | /* | ||
727 | * HW EXCEPTION rutine end | ||
728 | */ | ||
729 | |||
730 | /* | ||
731 | * Hardware maskable interrupts. | ||
732 | * | ||
733 | * The stack-pointer (r1) should have already been saved to the memory | ||
734 | * location PER_CPU(ENTRY_SP). | ||
735 | */ | ||
736 | C_ENTRY(_interrupt): | ||
737 | /* MS: we are in physical address */ | ||
738 | /* Save registers, switch to proper stack, convert SP to virtual.*/ | ||
739 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | ||
740 | swi r11, r0, TOPHYS(PER_CPU(R11_SAVE)); | ||
741 | /* MS: See if already in kernel mode. */ | ||
742 | lwi r11, r0, TOPHYS(PER_CPU(KM)); | ||
743 | beqi r11, 1f; /* MS: Jump ahead if coming from user */ | ||
744 | |||
745 | /* Kernel-mode state save. */ | ||
746 | or r11, r1, r0 | ||
747 | tophys(r1,r11); /* MS: I have in r1 physical address where stack is */ | ||
748 | /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/ | ||
749 | swi r11, r1, (PT_R1 - PT_SIZE); | ||
750 | /* MS: restore r11 because of saving in SAVE_REGS */ | ||
751 | lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE)); | ||
752 | /* save registers */ | ||
753 | /* MS: Make room on the stack -> activation record */ | ||
754 | addik r1, r1, -STATE_SAVE_SIZE; | ||
755 | /* MS: store return registers separately because | ||
756 | * this macros is use for others exceptions */ | ||
757 | swi r3, r1, PTO + PT_R3; | ||
758 | swi r4, r1, PTO + PT_R4; | ||
759 | SAVE_REGS | ||
760 | /* MS: store mode */ | ||
761 | addi r11, r0, 1; /* MS: Was in kernel-mode. */ | ||
762 | swi r11, r1, PTO + PT_MODE; /* MS: and save it */ | ||
763 | brid 2f; | ||
764 | nop; /* MS: Fill delay slot */ | ||
765 | |||
766 | 1: | ||
767 | /* User-mode state save. */ | ||
768 | /* MS: restore r11 -> FIXME move before SAVE_REG */ | ||
769 | lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE)); | ||
770 | /* MS: get the saved current */ | ||
771 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | ||
772 | tophys(r1,r1); | ||
773 | lwi r1, r1, TS_THREAD_INFO; | ||
774 | addik r1, r1, THREAD_SIZE; | ||
775 | tophys(r1,r1); | ||
776 | /* save registers */ | ||
777 | addik r1, r1, -STATE_SAVE_SIZE; | ||
778 | swi r3, r1, PTO+PT_R3; | ||
779 | swi r4, r1, PTO+PT_R4; | ||
780 | SAVE_REGS | ||
781 | /* calculate mode */ | ||
782 | swi r0, r1, PTO + PT_MODE; | ||
783 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | ||
784 | swi r11, r1, PTO+PT_R1; | ||
785 | /* setup kernel mode to KM */ | ||
786 | addi r11, r0, 1; | ||
787 | swi r11, r0, TOPHYS(PER_CPU(KM)); | ||
788 | |||
789 | 2: | ||
790 | lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | ||
791 | swi r0, r1, PTO + PT_R0; | ||
792 | tovirt(r1,r1) | ||
793 | la r5, r1, PTO; | ||
794 | set_vms; | ||
795 | la r11, r0, do_IRQ; | ||
796 | la r15, r0, irq_call; | ||
797 | irq_call:rtbd r11, 0; | ||
798 | nop; | ||
799 | |||
800 | /* MS: we are in virtual mode */ | ||
801 | ret_from_irq: | ||
802 | lwi r11, r1, PTO + PT_MODE; | ||
803 | bnei r11, 2f; | ||
804 | |||
805 | add r11, r0, CURRENT_TASK; | ||
806 | lwi r11, r11, TS_THREAD_INFO; | ||
807 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ | ||
808 | andi r11, r11, _TIF_NEED_RESCHED; | ||
809 | beqi r11, 5f | ||
810 | bralid r15, schedule; | ||
811 | nop; /* delay slot */ | ||
812 | |||
813 | /* Maybe handle a signal */ | ||
814 | 5: add r11, r0, CURRENT_TASK; | ||
815 | lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ | ||
816 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
817 | andi r11, r11, _TIF_SIGPENDING; | ||
818 | beqid r11, no_intr_resched | ||
819 | /* Handle a signal return; Pending signals should be in r18. */ | ||
820 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | ||
821 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | ||
822 | bralid r15, do_signal; /* Handle any signals */ | ||
823 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
824 | |||
825 | /* Finally, return to user state. */ | ||
826 | no_intr_resched: | ||
827 | /* Disable interrupts, we are now committed to the state restore */ | ||
828 | disable_irq | ||
829 | swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ | ||
830 | add r11, r0, CURRENT_TASK; | ||
831 | swi r11, r0, PER_CPU(CURRENT_SAVE); | ||
832 | VM_OFF; | ||
833 | tophys(r1,r1); | ||
834 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | ||
835 | lwi r4, r1, PTO + PT_R4; | ||
836 | RESTORE_REGS | ||
837 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | ||
838 | lwi r1, r1, PT_R1 - PT_SIZE; | ||
839 | bri 6f; | ||
840 | /* MS: Return to kernel state. */ | ||
841 | 2: VM_OFF /* MS: turn off MMU */ | ||
842 | tophys(r1,r1) | ||
843 | lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ | ||
844 | lwi r4, r1, PTO + PT_R4; | ||
845 | RESTORE_REGS | ||
846 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | ||
847 | tovirt(r1,r1); | ||
848 | 6: | ||
849 | IRQ_return: /* MS: Make global symbol for debugging */ | ||
850 | rtid r14, 0 | ||
851 | nop | ||
852 | |||
853 | /* | ||
854 | * `Debug' trap | ||
855 | * We enter dbtrap in "BIP" (breakpoint) mode. | ||
856 | * So we exit the breakpoint mode with an 'rtbd' and proceed with the | ||
857 | * original dbtrap. | ||
858 | * however, wait to save state first | ||
859 | */ | ||
860 | C_ENTRY(_debug_exception): | ||
861 | /* BIP bit is set on entry, no interrupts can occur */ | ||
862 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | ||
863 | |||
864 | swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ | ||
865 | set_bip; /*equalize initial state for all possible entries*/ | ||
866 | clear_eip; | ||
867 | enable_irq; | ||
868 | lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/ | ||
869 | beqi r11, 1f; /* Jump ahead if coming from user */ | ||
870 | /* Kernel-mode state save. */ | ||
871 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ | ||
872 | tophys(r1,r11); | ||
873 | swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ | ||
874 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
875 | |||
876 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | ||
877 | swi r3, r1, PTO + PT_R3; | ||
878 | swi r4, r1, PTO + PT_R4; | ||
879 | SAVE_REGS; | ||
880 | |||
881 | addi r11, r0, 1; /* Was in kernel-mode. */ | ||
882 | swi r11, r1, PTO + PT_MODE; | ||
883 | brid 2f; | ||
884 | nop; /* Fill delay slot */ | ||
885 | 1: /* User-mode state save. */ | ||
886 | lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */ | ||
887 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | ||
888 | tophys(r1,r1); | ||
889 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ | ||
890 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ | ||
891 | tophys(r1,r1); | ||
892 | |||
893 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | ||
894 | swi r3, r1, PTO + PT_R3; | ||
895 | swi r4, r1, PTO + PT_R4; | ||
896 | SAVE_REGS; | ||
897 | |||
898 | swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ | ||
899 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | ||
900 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | ||
901 | addi r11, r0, 1; | ||
902 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | ||
903 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | ||
904 | /* Save away the syscall number. */ | ||
905 | swi r0, r1, PTO+PT_R0; | ||
906 | tovirt(r1,r1) | ||
907 | |||
908 | addi r5, r0, SIGTRAP /* send the trap signal */ | ||
909 | add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
910 | addk r7, r0, r0 /* 3rd param zero */ | ||
911 | |||
912 | set_vms; | ||
913 | la r11, r0, send_sig; | ||
914 | la r15, r0, dbtrap_call; | ||
915 | dbtrap_call: rtbd r11, 0; | ||
916 | nop; | ||
917 | |||
918 | set_bip; /* Ints masked for state restore*/ | ||
919 | lwi r11, r1, PTO+PT_MODE; | ||
920 | bnei r11, 2f; | ||
921 | |||
922 | /* Get current task ptr into r11 */ | ||
923 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
924 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
925 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
926 | andi r11, r11, _TIF_NEED_RESCHED; | ||
927 | beqi r11, 5f; | ||
928 | |||
929 | /* Call the scheduler before returning from a syscall/trap. */ | ||
930 | |||
931 | bralid r15, schedule; /* Call scheduler */ | ||
932 | nop; /* delay slot */ | ||
933 | /* XXX Is PT_DTRACE handling needed here? */ | ||
934 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | ||
935 | |||
936 | /* Maybe handle a signal */ | ||
937 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
938 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
939 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | ||
940 | andi r11, r11, _TIF_SIGPENDING; | ||
941 | beqi r11, 1f; /* Signals to handle, handle them */ | ||
942 | |||
943 | /* Handle a signal return; Pending signals should be in r18. */ | ||
944 | /* Not all registers are saved by the normal trap/interrupt entry | ||
945 | points (for instance, call-saved registers (because the normal | ||
946 | C-compiler calling sequence in the kernel makes sure they're | ||
947 | preserved), and call-clobbered registers in the case of | ||
948 | traps), but signal handlers may want to examine or change the | ||
949 | complete register state. Here we save anything not saved by | ||
950 | the normal entry sequence, so that it may be safely restored | ||
951 | (in a possibly modified form) after do_signal returns. */ | ||
952 | |||
953 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | ||
954 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | ||
955 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | ||
956 | bralid r15, do_signal; /* Handle any signals */ | ||
957 | nop; | ||
958 | |||
959 | |||
960 | /* Finally, return to user state. */ | ||
961 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | ||
962 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
963 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | ||
964 | VM_OFF; | ||
965 | tophys(r1,r1); | ||
966 | |||
967 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
968 | lwi r4, r1, PTO+PT_R4; | ||
969 | RESTORE_REGS | ||
970 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
971 | |||
972 | |||
973 | lwi r1, r1, PT_R1 - PT_SIZE; | ||
974 | /* Restore user stack pointer. */ | ||
975 | bri 6f; | ||
976 | |||
977 | /* Return to kernel state. */ | ||
978 | 2: VM_OFF; | ||
979 | tophys(r1,r1); | ||
980 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | ||
981 | lwi r4, r1, PTO+PT_R4; | ||
982 | RESTORE_REGS | ||
983 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | ||
984 | |||
985 | tovirt(r1,r1); | ||
986 | 6: | ||
987 | DBTRAP_return: /* Make global symbol for debugging */ | ||
988 | rtbd r14, 0; /* Instructions to return from an IRQ */ | ||
989 | nop; | ||
990 | |||
991 | |||
992 | |||
993 | ENTRY(_switch_to) | ||
994 | /* prepare return value */ | ||
995 | addk r3, r0, r31 | ||
996 | |||
997 | /* save registers in cpu_context */ | ||
998 | /* use r11 and r12, volatile registers, as temp register */ | ||
999 | /* give start of cpu_context for previous process */ | ||
1000 | addik r11, r5, TI_CPU_CONTEXT | ||
1001 | swi r1, r11, CC_R1 | ||
1002 | swi r2, r11, CC_R2 | ||
1003 | /* skip volatile registers. | ||
1004 | * they are saved on stack when we jumped to _switch_to() */ | ||
1005 | /* dedicated registers */ | ||
1006 | swi r13, r11, CC_R13 | ||
1007 | swi r14, r11, CC_R14 | ||
1008 | swi r15, r11, CC_R15 | ||
1009 | swi r16, r11, CC_R16 | ||
1010 | swi r17, r11, CC_R17 | ||
1011 | swi r18, r11, CC_R18 | ||
1012 | /* save non-volatile registers */ | ||
1013 | swi r19, r11, CC_R19 | ||
1014 | swi r20, r11, CC_R20 | ||
1015 | swi r21, r11, CC_R21 | ||
1016 | swi r22, r11, CC_R22 | ||
1017 | swi r23, r11, CC_R23 | ||
1018 | swi r24, r11, CC_R24 | ||
1019 | swi r25, r11, CC_R25 | ||
1020 | swi r26, r11, CC_R26 | ||
1021 | swi r27, r11, CC_R27 | ||
1022 | swi r28, r11, CC_R28 | ||
1023 | swi r29, r11, CC_R29 | ||
1024 | swi r30, r11, CC_R30 | ||
1025 | /* special purpose registers */ | ||
1026 | mfs r12, rmsr | ||
1027 | nop | ||
1028 | swi r12, r11, CC_MSR | ||
1029 | mfs r12, rear | ||
1030 | nop | ||
1031 | swi r12, r11, CC_EAR | ||
1032 | mfs r12, resr | ||
1033 | nop | ||
1034 | swi r12, r11, CC_ESR | ||
1035 | mfs r12, rfsr | ||
1036 | nop | ||
1037 | swi r12, r11, CC_FSR | ||
1038 | |||
1039 | /* update r31, the current */ | ||
1040 | lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ | ||
1041 | /* stored it to current_save too */ | ||
1042 | swi r31, r0, PER_CPU(CURRENT_SAVE) | ||
1043 | |||
1044 | /* get new process' cpu context and restore */ | ||
1045 | /* give me start where start context of next task */ | ||
1046 | addik r11, r6, TI_CPU_CONTEXT | ||
1047 | |||
1048 | /* non-volatile registers */ | ||
1049 | lwi r30, r11, CC_R30 | ||
1050 | lwi r29, r11, CC_R29 | ||
1051 | lwi r28, r11, CC_R28 | ||
1052 | lwi r27, r11, CC_R27 | ||
1053 | lwi r26, r11, CC_R26 | ||
1054 | lwi r25, r11, CC_R25 | ||
1055 | lwi r24, r11, CC_R24 | ||
1056 | lwi r23, r11, CC_R23 | ||
1057 | lwi r22, r11, CC_R22 | ||
1058 | lwi r21, r11, CC_R21 | ||
1059 | lwi r20, r11, CC_R20 | ||
1060 | lwi r19, r11, CC_R19 | ||
1061 | /* dedicated registers */ | ||
1062 | lwi r18, r11, CC_R18 | ||
1063 | lwi r17, r11, CC_R17 | ||
1064 | lwi r16, r11, CC_R16 | ||
1065 | lwi r15, r11, CC_R15 | ||
1066 | lwi r14, r11, CC_R14 | ||
1067 | lwi r13, r11, CC_R13 | ||
1068 | /* skip volatile registers */ | ||
1069 | lwi r2, r11, CC_R2 | ||
1070 | lwi r1, r11, CC_R1 | ||
1071 | |||
1072 | /* special purpose registers */ | ||
1073 | lwi r12, r11, CC_FSR | ||
1074 | mts rfsr, r12 | ||
1075 | nop | ||
1076 | lwi r12, r11, CC_MSR | ||
1077 | mts rmsr, r12 | ||
1078 | nop | ||
1079 | |||
1080 | rtsd r15, 8 | ||
1081 | nop | ||
1082 | |||
1083 | ENTRY(_reset) | ||
1084 | brai 0x70; /* Jump back to FS-boot */ | ||
1085 | |||
1086 | ENTRY(_break) | ||
1087 | mfs r5, rmsr | ||
1088 | nop | ||
1089 | swi r5, r0, 0x250 + TOPHYS(r0_ram) | ||
1090 | mfs r5, resr | ||
1091 | nop | ||
1092 | swi r5, r0, 0x254 + TOPHYS(r0_ram) | ||
1093 | bri 0 | ||
1094 | |||
1095 | /* These are compiled and loaded into high memory, then | ||
1096 | * copied into place in mach_early_setup */ | ||
1097 | .section .init.ivt, "ax" | ||
1098 | .org 0x0 | ||
1099 | /* this is very important - here is the reset vector */ | ||
1100 | /* in current MMU branch you don't care what is here - it is | ||
1101 | * used from bootloader site - but this is correct for FS-BOOT */ | ||
1102 | brai 0x70 | ||
1103 | nop | ||
1104 | brai TOPHYS(_user_exception); /* syscall handler */ | ||
1105 | brai TOPHYS(_interrupt); /* Interrupt handler */ | ||
1106 | brai TOPHYS(_break); /* nmi trap handler */ | ||
1107 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ | ||
1108 | |||
1109 | .org 0x60 | ||
1110 | brai TOPHYS(_debug_exception); /* debug trap handler*/ | ||
1111 | |||
1112 | .section .rodata,"a" | ||
1113 | #include "syscall_table.S" | ||
1114 | |||
1115 | syscall_table_size=(.-sys_call_table) | ||
1116 | |||
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c index 4a8a4064c7ee..0cb64a31e89a 100644 --- a/arch/microblaze/kernel/exceptions.c +++ b/arch/microblaze/kernel/exceptions.c | |||
@@ -21,9 +21,9 @@ | |||
21 | 21 | ||
22 | #include <asm/exceptions.h> | 22 | #include <asm/exceptions.h> |
23 | #include <asm/entry.h> /* For KM CPU var */ | 23 | #include <asm/entry.h> /* For KM CPU var */ |
24 | #include <asm/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | #include <asm/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <asm/ptrace.h> | 26 | #include <linux/ptrace.h> |
27 | #include <asm/current.h> | 27 | #include <asm/current.h> |
28 | 28 | ||
29 | #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 | 29 | #define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 |
@@ -31,7 +31,7 @@ | |||
31 | #define MICROBLAZE_DBUS_EXCEPTION 0x04 | 31 | #define MICROBLAZE_DBUS_EXCEPTION 0x04 |
32 | #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 | 32 | #define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05 |
33 | #define MICROBLAZE_FPU_EXCEPTION 0x06 | 33 | #define MICROBLAZE_FPU_EXCEPTION 0x06 |
34 | #define MICROBLAZE_PRIVILEG_EXCEPTION 0x07 | 34 | #define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07 |
35 | 35 | ||
36 | static DEFINE_SPINLOCK(die_lock); | 36 | static DEFINE_SPINLOCK(die_lock); |
37 | 37 | ||
@@ -66,6 +66,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
66 | asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | 66 | asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, |
67 | int fsr, int addr) | 67 | int fsr, int addr) |
68 | { | 68 | { |
69 | #ifdef CONFIG_MMU | ||
70 | int code; | ||
71 | addr = regs->pc; | ||
72 | #endif | ||
73 | |||
69 | #if 0 | 74 | #if 0 |
70 | printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", | 75 | printk(KERN_WARNING "Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n", |
71 | type, user_mode(regs) ? "user" : "kernel", fsr, | 76 | type, user_mode(regs) ? "user" : "kernel", fsr, |
@@ -74,7 +79,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
74 | 79 | ||
75 | switch (type & 0x1F) { | 80 | switch (type & 0x1F) { |
76 | case MICROBLAZE_ILL_OPCODE_EXCEPTION: | 81 | case MICROBLAZE_ILL_OPCODE_EXCEPTION: |
77 | _exception(SIGILL, regs, ILL_ILLOPC, addr); | 82 | if (user_mode(regs)) { |
83 | printk(KERN_WARNING "Illegal opcode exception in user mode.\n"); | ||
84 | _exception(SIGILL, regs, ILL_ILLOPC, addr); | ||
85 | return; | ||
86 | } | ||
87 | printk(KERN_WARNING "Illegal opcode exception in kernel mode.\n"); | ||
88 | die("opcode exception", regs, SIGBUS); | ||
78 | break; | 89 | break; |
79 | case MICROBLAZE_IBUS_EXCEPTION: | 90 | case MICROBLAZE_IBUS_EXCEPTION: |
80 | if (user_mode(regs)) { | 91 | if (user_mode(regs)) { |
@@ -95,11 +106,16 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
95 | die("bus exception", regs, SIGBUS); | 106 | die("bus exception", regs, SIGBUS); |
96 | break; | 107 | break; |
97 | case MICROBLAZE_DIV_ZERO_EXCEPTION: | 108 | case MICROBLAZE_DIV_ZERO_EXCEPTION: |
98 | printk(KERN_WARNING "Divide by zero exception\n"); | 109 | if (user_mode(regs)) { |
99 | _exception(SIGILL, regs, ILL_ILLOPC, addr); | 110 | printk(KERN_WARNING "Divide by zero exception in user mode\n"); |
111 | _exception(SIGILL, regs, ILL_ILLOPC, addr); | ||
112 | return; | ||
113 | } | ||
114 | printk(KERN_WARNING "Divide by zero exception in kernel mode.\n"); | ||
115 | die("Divide by exception", regs, SIGBUS); | ||
100 | break; | 116 | break; |
101 | |||
102 | case MICROBLAZE_FPU_EXCEPTION: | 117 | case MICROBLAZE_FPU_EXCEPTION: |
118 | printk(KERN_WARNING "FPU exception\n"); | ||
103 | /* IEEE FP exception */ | 119 | /* IEEE FP exception */ |
104 | /* I removed fsr variable and use code var for storing fsr */ | 120 | /* I removed fsr variable and use code var for storing fsr */ |
105 | if (fsr & FSR_IO) | 121 | if (fsr & FSR_IO) |
@@ -115,7 +131,20 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, | |||
115 | _exception(SIGFPE, regs, fsr, addr); | 131 | _exception(SIGFPE, regs, fsr, addr); |
116 | break; | 132 | break; |
117 | 133 | ||
134 | #ifdef CONFIG_MMU | ||
135 | case MICROBLAZE_PRIVILEGED_EXCEPTION: | ||
136 | printk(KERN_WARNING "Privileged exception\n"); | ||
137 | /* "brk r0,r0" - used as debug breakpoint */ | ||
138 | if (get_user(code, (unsigned long *)regs->pc) == 0 | ||
139 | && code == 0x980c0000) { | ||
140 | _exception(SIGTRAP, regs, TRAP_BRKPT, addr); | ||
141 | } else { | ||
142 | _exception(SIGILL, regs, ILL_PRVOPC, addr); | ||
143 | } | ||
144 | break; | ||
145 | #endif | ||
118 | default: | 146 | default: |
147 | /* FIXME what to do in unexpected exception */ | ||
119 | printk(KERN_WARNING "Unexpected exception %02x " | 148 | printk(KERN_WARNING "Unexpected exception %02x " |
120 | "PC=%08x in %s mode\n", type, (unsigned int) addr, | 149 | "PC=%08x in %s mode\n", type, (unsigned int) addr, |
121 | kernel_mode(regs) ? "kernel" : "user"); | 150 | kernel_mode(regs) ? "kernel" : "user"); |
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 319dc35fc922..e568d6ec621b 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S | |||
@@ -3,6 +3,26 @@ | |||
3 | * Copyright (C) 2007-2009 PetaLogix | 3 | * Copyright (C) 2007-2009 PetaLogix |
4 | * Copyright (C) 2006 Atmark Techno, Inc. | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
5 | * | 5 | * |
6 | * MMU code derived from arch/ppc/kernel/head_4xx.S: | ||
7 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
8 | * Initial PowerPC version. | ||
9 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
10 | * Rewritten for PReP | ||
11 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
12 | * Low-level exception handers, MMU support, and rewrite. | ||
13 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
14 | * PowerPC 8xx modifications. | ||
15 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
16 | * PowerPC 403GCX modifications. | ||
17 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
18 | * PowerPC 403GCX/405GP modifications. | ||
19 | * Copyright 2000 MontaVista Software Inc. | ||
20 | * PPC405 modifications | ||
21 | * PowerPC 403GCX/405GP modifications. | ||
22 | * Author: MontaVista Software, Inc. | ||
23 | * frank_rowand@mvista.com or source@mvista.com | ||
24 | * debbie_chu@mvista.com | ||
25 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | 26 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 27 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 28 | * for more details. |
@@ -12,6 +32,22 @@ | |||
12 | #include <asm/thread_info.h> | 32 | #include <asm/thread_info.h> |
13 | #include <asm/page.h> | 33 | #include <asm/page.h> |
14 | 34 | ||
35 | #ifdef CONFIG_MMU | ||
36 | #include <asm/setup.h> /* COMMAND_LINE_SIZE */ | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/processor.h> | ||
39 | |||
40 | .data | ||
41 | .global empty_zero_page | ||
42 | .align 12 | ||
43 | empty_zero_page: | ||
44 | .space 4096 | ||
45 | .global swapper_pg_dir | ||
46 | swapper_pg_dir: | ||
47 | .space 4096 | ||
48 | |||
49 | #endif /* CONFIG_MMU */ | ||
50 | |||
15 | .text | 51 | .text |
16 | ENTRY(_start) | 52 | ENTRY(_start) |
17 | mfs r1, rmsr | 53 | mfs r1, rmsr |
@@ -32,6 +68,123 @@ _copy_fdt: | |||
32 | addik r3, r3, -4 /* descrement loop */ | 68 | addik r3, r3, -4 /* descrement loop */ |
33 | no_fdt_arg: | 69 | no_fdt_arg: |
34 | 70 | ||
71 | #ifdef CONFIG_MMU | ||
72 | |||
73 | #ifndef CONFIG_CMDLINE_BOOL | ||
74 | /* | ||
75 | * handling command line | ||
76 | * copy command line to __init_end. There is space for storing command line. | ||
77 | */ | ||
78 | or r6, r0, r0 /* incremment */ | ||
79 | ori r4, r0, __init_end /* load address of command line */ | ||
80 | tophys(r4,r4) /* convert to phys address */ | ||
81 | ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ | ||
82 | _copy_command_line: | ||
83 | lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ | ||
84 | sb r7, r4, r6 /* addr[r4+r6]= r7*/ | ||
85 | addik r6, r6, 1 /* increment counting */ | ||
86 | bgtid r3, _copy_command_line /* loop for all entries */ | ||
87 | addik r3, r3, -1 /* descrement loop */ | ||
88 | addik r5, r4, 0 /* add new space for command line */ | ||
89 | tovirt(r5,r5) | ||
90 | #endif /* CONFIG_CMDLINE_BOOL */ | ||
91 | |||
92 | #ifdef NOT_COMPILE | ||
93 | /* save bram context */ | ||
94 | or r6, r0, r0 /* incremment */ | ||
95 | ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */ | ||
96 | ori r3, r0, (LMB_SIZE - 4) | ||
97 | _copy_bram: | ||
98 | lw r7, r0, r6 /* r7 = r0 + r6 */ | ||
99 | sw r7, r4, r6 /* addr[r4 + r6] = r7*/ | ||
100 | addik r6, r6, 4 /* increment counting */ | ||
101 | bgtid r3, _copy_bram /* loop for all entries */ | ||
102 | addik r3, r3, -4 /* descrement loop */ | ||
103 | #endif | ||
104 | /* We have to turn on the MMU right away. */ | ||
105 | |||
106 | /* | ||
107 | * Set up the initial MMU state so we can do the first level of | ||
108 | * kernel initialization. This maps the first 16 MBytes of memory 1:1 | ||
109 | * virtual to physical. | ||
110 | */ | ||
111 | nop | ||
112 | addik r3, r0, 63 /* Invalidate all TLB entries */ | ||
113 | _invalidate: | ||
114 | mts rtlbx, r3 | ||
115 | mts rtlbhi, r0 /* flush: ensure V is clear */ | ||
116 | bgtid r3, _invalidate /* loop for all entries */ | ||
117 | addik r3, r3, -1 | ||
118 | /* sync */ | ||
119 | |||
120 | /* | ||
121 | * We should still be executing code at physical address area | ||
122 | * RAM_BASEADDR at this point. However, kernel code is at | ||
123 | * a virtual address. So, set up a TLB mapping to cover this once | ||
124 | * translation is enabled. | ||
125 | */ | ||
126 | |||
127 | addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ | ||
128 | tophys(r4,r3) /* Load the kernel physical address */ | ||
129 | |||
130 | mts rpid,r0 /* Load the kernel PID */ | ||
131 | nop | ||
132 | bri 4 | ||
133 | |||
134 | /* | ||
135 | * Configure and load two entries into TLB slots 0 and 1. | ||
136 | * In case we are pinning TLBs, these are reserved in by the | ||
137 | * other TLB functions. If not reserving, then it doesn't | ||
138 | * matter where they are loaded. | ||
139 | */ | ||
140 | andi r4,r4,0xfffffc00 /* Mask off the real page number */ | ||
141 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | ||
142 | |||
143 | andi r3,r3,0xfffffc00 /* Mask off the effective page number */ | ||
144 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) | ||
145 | |||
146 | mts rtlbx,r0 /* TLB slow 0 */ | ||
147 | |||
148 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
149 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | ||
150 | |||
151 | addik r4, r4, 0x01000000 /* Map next 16 M entries */ | ||
152 | addik r3, r3, 0x01000000 | ||
153 | |||
154 | ori r6,r0,1 /* TLB slot 1 */ | ||
155 | mts rtlbx,r6 | ||
156 | |||
157 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
158 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | ||
159 | |||
160 | /* | ||
161 | * Load a TLB entry for LMB, since we need access to | ||
162 | * the exception vectors, using a 4k real==virtual mapping. | ||
163 | */ | ||
164 | ori r6,r0,3 /* TLB slot 3 */ | ||
165 | mts rtlbx,r6 | ||
166 | |||
167 | ori r4,r0,(TLB_WR | TLB_EX) | ||
168 | ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) | ||
169 | |||
170 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
171 | mts rtlbhi,r3 /* Load the tag portion of the entry */ | ||
172 | |||
173 | /* | ||
174 | * We now have the lower 16 Meg of RAM mapped into TLB entries, and the | ||
175 | * caches ready to work. | ||
176 | */ | ||
177 | turn_on_mmu: | ||
178 | ori r15,r0,start_here | ||
179 | ori r4,r0,MSR_KERNEL_VMS | ||
180 | mts rmsr,r4 | ||
181 | nop | ||
182 | rted r15,0 /* enables MMU */ | ||
183 | nop | ||
184 | |||
185 | start_here: | ||
186 | #endif /* CONFIG_MMU */ | ||
187 | |||
35 | /* Initialize small data anchors */ | 188 | /* Initialize small data anchors */ |
36 | la r13, r0, _KERNEL_SDA_BASE_ | 189 | la r13, r0, _KERNEL_SDA_BASE_ |
37 | la r2, r0, _KERNEL_SDA2_BASE_ | 190 | la r2, r0, _KERNEL_SDA2_BASE_ |
@@ -51,6 +204,43 @@ no_fdt_arg: | |||
51 | brald r15, r8 | 204 | brald r15, r8 |
52 | nop | 205 | nop |
53 | 206 | ||
207 | #ifndef CONFIG_MMU | ||
54 | la r15, r0, machine_halt | 208 | la r15, r0, machine_halt |
55 | braid start_kernel | 209 | braid start_kernel |
56 | nop | 210 | nop |
211 | #else | ||
212 | /* | ||
213 | * Initialize the MMU. | ||
214 | */ | ||
215 | bralid r15, mmu_init | ||
216 | nop | ||
217 | |||
218 | /* Go back to running unmapped so we can load up new values | ||
219 | * and change to using our exception vectors. | ||
220 | * On the MicroBlaze, all we invalidate the used TLB entries to clear | ||
221 | * the old 16M byte TLB mappings. | ||
222 | */ | ||
223 | ori r15,r0,TOPHYS(kernel_load_context) | ||
224 | ori r4,r0,MSR_KERNEL | ||
225 | mts rmsr,r4 | ||
226 | nop | ||
227 | bri 4 | ||
228 | rted r15,0 | ||
229 | nop | ||
230 | |||
231 | /* Load up the kernel context */ | ||
232 | kernel_load_context: | ||
233 | # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away. | ||
234 | ori r5,r0,3 | ||
235 | mts rtlbx,r5 | ||
236 | nop | ||
237 | mts rtlbhi,r0 | ||
238 | nop | ||
239 | addi r15, r0, machine_halt | ||
240 | ori r17, r0, start_kernel | ||
241 | ori r4, r0, MSR_KERNEL_VMS | ||
242 | mts rmsr, r4 | ||
243 | nop | ||
244 | rted r17, 0 /* enable MMU and jump to start_kernel */ | ||
245 | nop | ||
246 | #endif /* CONFIG_MMU */ | ||
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index cf9486d99838..9d591cd74fc2 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S | |||
@@ -53,6 +53,12 @@ | |||
53 | * - Illegal instruction opcode | 53 | * - Illegal instruction opcode |
54 | * - Divide-by-zero | 54 | * - Divide-by-zero |
55 | * | 55 | * |
56 | * - Privileged instruction exception (MMU) | ||
57 | * - Data storage exception (MMU) | ||
58 | * - Instruction storage exception (MMU) | ||
59 | * - Data TLB miss exception (MMU) | ||
60 | * - Instruction TLB miss exception (MMU) | ||
61 | * | ||
56 | * Note we disable interrupts during exception handling, otherwise we will | 62 | * Note we disable interrupts during exception handling, otherwise we will |
57 | * possibly get multiple re-entrancy if interrupt handles themselves cause | 63 | * possibly get multiple re-entrancy if interrupt handles themselves cause |
58 | * exceptions. JW | 64 | * exceptions. JW |
@@ -71,9 +77,24 @@ | |||
71 | #include <asm/asm-offsets.h> | 77 | #include <asm/asm-offsets.h> |
72 | 78 | ||
73 | /* Helpful Macros */ | 79 | /* Helpful Macros */ |
80 | #ifndef CONFIG_MMU | ||
74 | #define EX_HANDLER_STACK_SIZ (4*19) | 81 | #define EX_HANDLER_STACK_SIZ (4*19) |
82 | #endif | ||
75 | #define NUM_TO_REG(num) r ## num | 83 | #define NUM_TO_REG(num) r ## num |
76 | 84 | ||
85 | #ifdef CONFIG_MMU | ||
86 | /* FIXME you can't change first load of MSR because there is | ||
87 | * hardcoded jump bri 4 */ | ||
88 | #define RESTORE_STATE \ | ||
89 | lwi r3, r1, PT_R3; \ | ||
90 | lwi r4, r1, PT_R4; \ | ||
91 | lwi r5, r1, PT_R5; \ | ||
92 | lwi r6, r1, PT_R6; \ | ||
93 | lwi r11, r1, PT_R11; \ | ||
94 | lwi r31, r1, PT_R31; \ | ||
95 | lwi r1, r0, TOPHYS(r0_ram + 0); | ||
96 | #endif /* CONFIG_MMU */ | ||
97 | |||
77 | #define LWREG_NOP \ | 98 | #define LWREG_NOP \ |
78 | bri ex_handler_unhandled; \ | 99 | bri ex_handler_unhandled; \ |
79 | nop; | 100 | nop; |
@@ -106,6 +127,54 @@ | |||
106 | or r3, r0, NUM_TO_REG (regnum); \ | 127 | or r3, r0, NUM_TO_REG (regnum); \ |
107 | bri ex_sw_tail; | 128 | bri ex_sw_tail; |
108 | 129 | ||
130 | #ifdef CONFIG_MMU | ||
131 | #define R3_TO_LWREG_VM_V(regnum) \ | ||
132 | brid ex_lw_end_vm; \ | ||
133 | swi r3, r7, 4 * regnum; | ||
134 | |||
135 | #define R3_TO_LWREG_VM(regnum) \ | ||
136 | brid ex_lw_end_vm; \ | ||
137 | or NUM_TO_REG (regnum), r0, r3; | ||
138 | |||
139 | #define SWREG_TO_R3_VM_V(regnum) \ | ||
140 | brid ex_sw_tail_vm; \ | ||
141 | lwi r3, r7, 4 * regnum; | ||
142 | |||
143 | #define SWREG_TO_R3_VM(regnum) \ | ||
144 | brid ex_sw_tail_vm; \ | ||
145 | or r3, r0, NUM_TO_REG (regnum); | ||
146 | |||
147 | /* Shift right instruction depending on available configuration */ | ||
148 | #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0 | ||
149 | #define BSRLI(rD, rA, imm) \ | ||
150 | bsrli rD, rA, imm | ||
151 | #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0 | ||
152 | #define BSRLI(rD, rA, imm) \ | ||
153 | ori rD, r0, (1 << imm); \ | ||
154 | idivu rD, rD, rA | ||
155 | #else | ||
156 | #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA) | ||
157 | /* Only the used shift constants defined here - add more if needed */ | ||
158 | #define BSRLI2(rD, rA) \ | ||
159 | srl rD, rA; /* << 1 */ \ | ||
160 | srl rD, rD; /* << 2 */ | ||
161 | #define BSRLI10(rD, rA) \ | ||
162 | srl rD, rA; /* << 1 */ \ | ||
163 | srl rD, rD; /* << 2 */ \ | ||
164 | srl rD, rD; /* << 3 */ \ | ||
165 | srl rD, rD; /* << 4 */ \ | ||
166 | srl rD, rD; /* << 5 */ \ | ||
167 | srl rD, rD; /* << 6 */ \ | ||
168 | srl rD, rD; /* << 7 */ \ | ||
169 | srl rD, rD; /* << 8 */ \ | ||
170 | srl rD, rD; /* << 9 */ \ | ||
171 | srl rD, rD /* << 10 */ | ||
172 | #define BSRLI20(rD, rA) \ | ||
173 | BSRLI10(rD, rA); \ | ||
174 | BSRLI10(rD, rD) | ||
175 | #endif | ||
176 | #endif /* CONFIG_MMU */ | ||
177 | |||
109 | .extern other_exception_handler /* Defined in exception.c */ | 178 | .extern other_exception_handler /* Defined in exception.c */ |
110 | 179 | ||
111 | /* | 180 | /* |
@@ -163,34 +232,119 @@ | |||
163 | 232 | ||
164 | /* wrappers to restore state before coming to entry.S */ | 233 | /* wrappers to restore state before coming to entry.S */ |
165 | 234 | ||
235 | #ifdef CONFIG_MMU | ||
236 | .section .rodata | ||
237 | .align 4 | ||
238 | _MB_HW_ExceptionVectorTable: | ||
239 | /* 0 - Undefined */ | ||
240 | .long TOPHYS(ex_handler_unhandled) | ||
241 | /* 1 - Unaligned data access exception */ | ||
242 | .long TOPHYS(handle_unaligned_ex) | ||
243 | /* 2 - Illegal op-code exception */ | ||
244 | .long TOPHYS(full_exception_trapw) | ||
245 | /* 3 - Instruction bus error exception */ | ||
246 | .long TOPHYS(full_exception_trapw) | ||
247 | /* 4 - Data bus error exception */ | ||
248 | .long TOPHYS(full_exception_trapw) | ||
249 | /* 5 - Divide by zero exception */ | ||
250 | .long TOPHYS(full_exception_trapw) | ||
251 | /* 6 - Floating point unit exception */ | ||
252 | .long TOPHYS(full_exception_trapw) | ||
253 | /* 7 - Privileged instruction exception */ | ||
254 | .long TOPHYS(full_exception_trapw) | ||
255 | /* 8 - 15 - Undefined */ | ||
256 | .long TOPHYS(ex_handler_unhandled) | ||
257 | .long TOPHYS(ex_handler_unhandled) | ||
258 | .long TOPHYS(ex_handler_unhandled) | ||
259 | .long TOPHYS(ex_handler_unhandled) | ||
260 | .long TOPHYS(ex_handler_unhandled) | ||
261 | .long TOPHYS(ex_handler_unhandled) | ||
262 | .long TOPHYS(ex_handler_unhandled) | ||
263 | .long TOPHYS(ex_handler_unhandled) | ||
264 | /* 16 - Data storage exception */ | ||
265 | .long TOPHYS(handle_data_storage_exception) | ||
266 | /* 17 - Instruction storage exception */ | ||
267 | .long TOPHYS(handle_instruction_storage_exception) | ||
268 | /* 18 - Data TLB miss exception */ | ||
269 | .long TOPHYS(handle_data_tlb_miss_exception) | ||
270 | /* 19 - Instruction TLB miss exception */ | ||
271 | .long TOPHYS(handle_instruction_tlb_miss_exception) | ||
272 | /* 20 - 31 - Undefined */ | ||
273 | .long TOPHYS(ex_handler_unhandled) | ||
274 | .long TOPHYS(ex_handler_unhandled) | ||
275 | .long TOPHYS(ex_handler_unhandled) | ||
276 | .long TOPHYS(ex_handler_unhandled) | ||
277 | .long TOPHYS(ex_handler_unhandled) | ||
278 | .long TOPHYS(ex_handler_unhandled) | ||
279 | .long TOPHYS(ex_handler_unhandled) | ||
280 | .long TOPHYS(ex_handler_unhandled) | ||
281 | .long TOPHYS(ex_handler_unhandled) | ||
282 | .long TOPHYS(ex_handler_unhandled) | ||
283 | .long TOPHYS(ex_handler_unhandled) | ||
284 | .long TOPHYS(ex_handler_unhandled) | ||
285 | #endif | ||
286 | |||
166 | .global _hw_exception_handler | 287 | .global _hw_exception_handler |
167 | .section .text | 288 | .section .text |
168 | .align 4 | 289 | .align 4 |
169 | .ent _hw_exception_handler | 290 | .ent _hw_exception_handler |
170 | _hw_exception_handler: | 291 | _hw_exception_handler: |
292 | #ifndef CONFIG_MMU | ||
171 | addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ | 293 | addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ |
294 | #else | ||
295 | swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */ | ||
296 | /* Save date to kernel memory. Here is the problem | ||
297 | * when you came from user space */ | ||
298 | ori r1, r0, TOPHYS(r0_ram + 28); | ||
299 | #endif | ||
172 | swi r3, r1, PT_R3 | 300 | swi r3, r1, PT_R3 |
173 | swi r4, r1, PT_R4 | 301 | swi r4, r1, PT_R4 |
174 | swi r5, r1, PT_R5 | 302 | swi r5, r1, PT_R5 |
175 | swi r6, r1, PT_R6 | 303 | swi r6, r1, PT_R6 |
176 | 304 | ||
177 | mfs r5, rmsr; | 305 | #ifdef CONFIG_MMU |
178 | nop | 306 | swi r11, r1, PT_R11 |
179 | swi r5, r1, 0; | 307 | swi r31, r1, PT_R31 |
180 | mfs r4, rbtr /* Save BTR before jumping to handler */ | 308 | lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */ |
181 | nop | 309 | #endif |
310 | |||
182 | mfs r3, resr | 311 | mfs r3, resr |
183 | nop | 312 | nop |
313 | mfs r4, rear; | ||
314 | nop | ||
184 | 315 | ||
316 | #ifndef CONFIG_MMU | ||
185 | andi r5, r3, 0x1000; /* Check ESR[DS] */ | 317 | andi r5, r3, 0x1000; /* Check ESR[DS] */ |
186 | beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ | 318 | beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ |
187 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | 319 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ |
188 | nop | 320 | nop |
189 | not_in_delay_slot: | 321 | not_in_delay_slot: |
190 | swi r17, r1, PT_R17 | 322 | swi r17, r1, PT_R17 |
323 | #endif | ||
191 | 324 | ||
192 | andi r5, r3, 0x1F; /* Extract ESR[EXC] */ | 325 | andi r5, r3, 0x1F; /* Extract ESR[EXC] */ |
193 | 326 | ||
327 | #ifdef CONFIG_MMU | ||
328 | /* Calculate exception vector offset = r5 << 2 */ | ||
329 | addk r6, r5, r5; /* << 1 */ | ||
330 | addk r6, r6, r6; /* << 2 */ | ||
331 | |||
332 | /* counting which exception happen */ | ||
333 | lwi r5, r0, 0x200 + TOPHYS(r0_ram) | ||
334 | addi r5, r5, 1 | ||
335 | swi r5, r0, 0x200 + TOPHYS(r0_ram) | ||
336 | lwi r5, r6, 0x200 + TOPHYS(r0_ram) | ||
337 | addi r5, r5, 1 | ||
338 | swi r5, r6, 0x200 + TOPHYS(r0_ram) | ||
339 | /* end */ | ||
340 | /* Load the HW Exception vector */ | ||
341 | lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) | ||
342 | bra r6 | ||
343 | |||
344 | full_exception_trapw: | ||
345 | RESTORE_STATE | ||
346 | bri full_exception_trap | ||
347 | #else | ||
194 | /* Exceptions enabled here. This will allow nested exceptions */ | 348 | /* Exceptions enabled here. This will allow nested exceptions */ |
195 | mfs r6, rmsr; | 349 | mfs r6, rmsr; |
196 | nop | 350 | nop |
@@ -254,6 +408,7 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
254 | lwi r18, r1, PT_R18 | 408 | lwi r18, r1, PT_R18 |
255 | 409 | ||
256 | bri ex_handler_done; /* Complete exception handling */ | 410 | bri ex_handler_done; /* Complete exception handling */ |
411 | #endif | ||
257 | 412 | ||
258 | /* 0x01 - Unaligned data access exception | 413 | /* 0x01 - Unaligned data access exception |
259 | * This occurs when a word access is not aligned on a word boundary, | 414 | * This occurs when a word access is not aligned on a word boundary, |
@@ -265,11 +420,28 @@ handle_other_ex: /* Handle Other exceptions here */ | |||
265 | handle_unaligned_ex: | 420 | handle_unaligned_ex: |
266 | /* Working registers already saved: R3, R4, R5, R6 | 421 | /* Working registers already saved: R3, R4, R5, R6 |
267 | * R3 = ESR | 422 | * R3 = ESR |
268 | * R4 = BTR | 423 | * R4 = EAR |
269 | */ | 424 | */ |
270 | mfs r4, rear; | 425 | #ifdef CONFIG_MMU |
426 | andi r6, r3, 0x1000 /* Check ESR[DS] */ | ||
427 | beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ | ||
428 | mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ | ||
271 | nop | 429 | nop |
430 | _no_delayslot: | ||
431 | #endif | ||
432 | |||
433 | #ifdef CONFIG_MMU | ||
434 | /* Check if unaligned address is last on a 4k page */ | ||
435 | andi r5, r4, 0xffc | ||
436 | xori r5, r5, 0xffc | ||
437 | bnei r5, _unaligned_ex2 | ||
438 | _unaligned_ex1: | ||
439 | RESTORE_STATE; | ||
440 | /* Another page must be accessed or physical address not in page table */ | ||
441 | bri unaligned_data_trap | ||
272 | 442 | ||
443 | _unaligned_ex2: | ||
444 | #endif | ||
273 | andi r6, r3, 0x3E0; /* Mask and extract the register operand */ | 445 | andi r6, r3, 0x3E0; /* Mask and extract the register operand */ |
274 | srl r6, r6; /* r6 >> 5 */ | 446 | srl r6, r6; /* r6 >> 5 */ |
275 | srl r6, r6; | 447 | srl r6, r6; |
@@ -278,6 +450,45 @@ handle_unaligned_ex: | |||
278 | srl r6, r6; | 450 | srl r6, r6; |
279 | /* Store the register operand in a temporary location */ | 451 | /* Store the register operand in a temporary location */ |
280 | sbi r6, r0, TOPHYS(ex_reg_op); | 452 | sbi r6, r0, TOPHYS(ex_reg_op); |
453 | #ifdef CONFIG_MMU | ||
454 | /* Get physical address */ | ||
455 | /* If we are faulting a kernel address, we have to use the | ||
456 | * kernel page tables. | ||
457 | */ | ||
458 | ori r5, r0, CONFIG_KERNEL_START | ||
459 | cmpu r5, r4, r5 | ||
460 | bgti r5, _unaligned_ex3 | ||
461 | ori r5, r0, swapper_pg_dir | ||
462 | bri _unaligned_ex4 | ||
463 | |||
464 | /* Get the PGD for the current thread. */ | ||
465 | _unaligned_ex3: /* user thread */ | ||
466 | addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */ | ||
467 | lwi r5, r5, TASK_THREAD + PGDIR | ||
468 | _unaligned_ex4: | ||
469 | tophys(r5,r5) | ||
470 | BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */ | ||
471 | andi r6, r6, 0xffc | ||
472 | /* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */ | ||
473 | or r5, r5, r6 | ||
474 | lwi r6, r5, 0 /* Get L1 entry */ | ||
475 | andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */ | ||
476 | beqi r5, _unaligned_ex1 /* Bail if no table */ | ||
477 | |||
478 | tophys(r5,r5) | ||
479 | BSRLI(r6,r4,10) /* Compute PTE address */ | ||
480 | andi r6, r6, 0xffc | ||
481 | andi r5, r5, 0xfffff003 | ||
482 | or r5, r5, r6 | ||
483 | lwi r5, r5, 0 /* Get Linux PTE */ | ||
484 | |||
485 | andi r6, r5, _PAGE_PRESENT | ||
486 | beqi r6, _unaligned_ex1 /* Bail if no page */ | ||
487 | |||
488 | andi r5, r5, 0xfffff000 /* Extract RPN */ | ||
489 | andi r4, r4, 0x00000fff /* Extract offset */ | ||
490 | or r4, r4, r5 /* Create physical address */ | ||
491 | #endif /* CONFIG_MMU */ | ||
281 | 492 | ||
282 | andi r6, r3, 0x400; /* Extract ESR[S] */ | 493 | andi r6, r3, 0x400; /* Extract ESR[S] */ |
283 | bnei r6, ex_sw; | 494 | bnei r6, ex_sw; |
@@ -355,6 +566,7 @@ ex_shw: | |||
355 | ex_sw_end: /* Exception handling of store word, ends. */ | 566 | ex_sw_end: /* Exception handling of store word, ends. */ |
356 | 567 | ||
357 | ex_handler_done: | 568 | ex_handler_done: |
569 | #ifndef CONFIG_MMU | ||
358 | lwi r5, r1, 0 /* RMSR */ | 570 | lwi r5, r1, 0 /* RMSR */ |
359 | mts rmsr, r5 | 571 | mts rmsr, r5 |
360 | nop | 572 | nop |
@@ -366,13 +578,455 @@ ex_handler_done: | |||
366 | 578 | ||
367 | rted r17, 0 | 579 | rted r17, 0 |
368 | addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ | 580 | addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ |
581 | #else | ||
582 | RESTORE_STATE; | ||
583 | rted r17, 0 | ||
584 | nop | ||
585 | #endif | ||
586 | |||
587 | #ifdef CONFIG_MMU | ||
588 | /* Exception vector entry code. This code runs with address translation | ||
589 | * turned off (i.e. using physical addresses). */ | ||
590 | |||
591 | /* Exception vectors. */ | ||
592 | |||
593 | /* 0x10 - Data Storage Exception | ||
594 | * This happens for just a few reasons. U0 set (but we don't do that), | ||
595 | * or zone protection fault (user violation, write to protected page). | ||
596 | * If this is just an update of modified status, we do that quickly | ||
597 | * and exit. Otherwise, we call heavyweight functions to do the work. | ||
598 | */ | ||
599 | handle_data_storage_exception: | ||
600 | /* Working registers already saved: R3, R4, R5, R6 | ||
601 | * R3 = ESR | ||
602 | */ | ||
603 | mfs r11, rpid | ||
604 | nop | ||
605 | bri 4 | ||
606 | mfs r3, rear /* Get faulting address */ | ||
607 | nop | ||
608 | /* If we are faulting a kernel address, we have to use the | ||
609 | * kernel page tables. | ||
610 | */ | ||
611 | ori r4, r0, CONFIG_KERNEL_START | ||
612 | cmpu r4, r3, r4 | ||
613 | bgti r4, ex3 | ||
614 | /* First, check if it was a zone fault (which means a user | ||
615 | * tried to access a kernel or read-protected page - always | ||
616 | * a SEGV). All other faults here must be stores, so no | ||
617 | * need to check ESR_S as well. */ | ||
618 | mfs r4, resr | ||
619 | nop | ||
620 | andi r4, r4, 0x800 /* ESR_Z - zone protection */ | ||
621 | bnei r4, ex2 | ||
622 | |||
623 | ori r4, r0, swapper_pg_dir | ||
624 | mts rpid, r0 /* TLB will have 0 TID */ | ||
625 | nop | ||
626 | bri ex4 | ||
627 | |||
628 | /* Get the PGD for the current thread. */ | ||
629 | ex3: | ||
630 | /* First, check if it was a zone fault (which means a user | ||
631 | * tried to access a kernel or read-protected page - always | ||
632 | * a SEGV). All other faults here must be stores, so no | ||
633 | * need to check ESR_S as well. */ | ||
634 | mfs r4, resr | ||
635 | nop | ||
636 | andi r4, r4, 0x800 /* ESR_Z */ | ||
637 | bnei r4, ex2 | ||
638 | /* get current task address */ | ||
639 | addi r4 ,CURRENT_TASK, TOPHYS(0); | ||
640 | lwi r4, r4, TASK_THREAD+PGDIR | ||
641 | ex4: | ||
642 | tophys(r4,r4) | ||
643 | BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ | ||
644 | andi r5, r5, 0xffc | ||
645 | /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ | ||
646 | or r4, r4, r5 | ||
647 | lwi r4, r4, 0 /* Get L1 entry */ | ||
648 | andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ | ||
649 | beqi r5, ex2 /* Bail if no table */ | ||
650 | |||
651 | tophys(r5,r5) | ||
652 | BSRLI(r6,r3,10) /* Compute PTE address */ | ||
653 | andi r6, r6, 0xffc | ||
654 | andi r5, r5, 0xfffff003 | ||
655 | or r5, r5, r6 | ||
656 | lwi r4, r5, 0 /* Get Linux PTE */ | ||
657 | |||
658 | andi r6, r4, _PAGE_RW /* Is it writeable? */ | ||
659 | beqi r6, ex2 /* Bail if not */ | ||
660 | |||
661 | /* Update 'changed' */ | ||
662 | ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
663 | swi r4, r5, 0 /* Update Linux page table */ | ||
664 | |||
665 | /* Most of the Linux PTE is ready to load into the TLB LO. | ||
666 | * We set ZSEL, where only the LS-bit determines user access. | ||
667 | * We set execute, because we don't have the granularity to | ||
668 | * properly set this at the page level (Linux problem). | ||
669 | * If shared is set, we cause a zero PID->TID load. | ||
670 | * Many of these bits are software only. Bits we don't set | ||
671 | * here we (properly should) assume have the appropriate value. | ||
672 | */ | ||
673 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | ||
674 | ori r4, r4, _PAGE_HWEXEC /* make it executable */ | ||
675 | |||
676 | /* find the TLB index that caused the fault. It has to be here*/ | ||
677 | mts rtlbsx, r3 | ||
678 | nop | ||
679 | mfs r5, rtlbx /* DEBUG: TBD */ | ||
680 | nop | ||
681 | mts rtlblo, r4 /* Load TLB LO */ | ||
682 | nop | ||
683 | /* Will sync shadow TLBs */ | ||
684 | |||
685 | /* Done...restore registers and get out of here. */ | ||
686 | mts rpid, r11 | ||
687 | nop | ||
688 | bri 4 | ||
689 | |||
690 | RESTORE_STATE; | ||
691 | rted r17, 0 | ||
692 | nop | ||
693 | ex2: | ||
694 | /* The bailout. Restore registers to pre-exception conditions | ||
695 | * and call the heavyweights to help us out. */ | ||
696 | mts rpid, r11 | ||
697 | nop | ||
698 | bri 4 | ||
699 | RESTORE_STATE; | ||
700 | bri page_fault_data_trap | ||
701 | |||
702 | |||
703 | /* 0x11 - Instruction Storage Exception | ||
704 | * This is caused by a fetch from non-execute or guarded pages. */ | ||
705 | handle_instruction_storage_exception: | ||
706 | /* Working registers already saved: R3, R4, R5, R6 | ||
707 | * R3 = ESR | ||
708 | */ | ||
709 | |||
710 | mfs r3, rear /* Get faulting address */ | ||
711 | nop | ||
712 | RESTORE_STATE; | ||
713 | bri page_fault_instr_trap | ||
714 | |||
715 | /* 0x12 - Data TLB Miss Exception | ||
716 | * As the name implies, translation is not in the MMU, so search the | ||
717 | * page tables and fix it. The only purpose of this function is to | ||
718 | * load TLB entries from the page table if they exist. | ||
719 | */ | ||
720 | handle_data_tlb_miss_exception: | ||
721 | /* Working registers already saved: R3, R4, R5, R6 | ||
722 | * R3 = ESR | ||
723 | */ | ||
724 | mfs r11, rpid | ||
725 | nop | ||
726 | bri 4 | ||
727 | mfs r3, rear /* Get faulting address */ | ||
728 | nop | ||
729 | |||
730 | /* If we are faulting a kernel address, we have to use the | ||
731 | * kernel page tables. */ | ||
732 | ori r4, r0, CONFIG_KERNEL_START | ||
733 | cmpu r4, r3, r4 | ||
734 | bgti r4, ex5 | ||
735 | ori r4, r0, swapper_pg_dir | ||
736 | mts rpid, r0 /* TLB will have 0 TID */ | ||
737 | nop | ||
738 | bri ex6 | ||
369 | 739 | ||
740 | /* Get the PGD for the current thread. */ | ||
741 | ex5: | ||
742 | /* get current task address */ | ||
743 | addi r4 ,CURRENT_TASK, TOPHYS(0); | ||
744 | lwi r4, r4, TASK_THREAD+PGDIR | ||
745 | ex6: | ||
746 | tophys(r4,r4) | ||
747 | BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ | ||
748 | andi r5, r5, 0xffc | ||
749 | /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ | ||
750 | or r4, r4, r5 | ||
751 | lwi r4, r4, 0 /* Get L1 entry */ | ||
752 | andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ | ||
753 | beqi r5, ex7 /* Bail if no table */ | ||
754 | |||
755 | tophys(r5,r5) | ||
756 | BSRLI(r6,r3,10) /* Compute PTE address */ | ||
757 | andi r6, r6, 0xffc | ||
758 | andi r5, r5, 0xfffff003 | ||
759 | or r5, r5, r6 | ||
760 | lwi r4, r5, 0 /* Get Linux PTE */ | ||
761 | |||
762 | andi r6, r4, _PAGE_PRESENT | ||
763 | beqi r6, ex7 | ||
764 | |||
765 | ori r4, r4, _PAGE_ACCESSED | ||
766 | swi r4, r5, 0 | ||
767 | |||
768 | /* Most of the Linux PTE is ready to load into the TLB LO. | ||
769 | * We set ZSEL, where only the LS-bit determines user access. | ||
770 | * We set execute, because we don't have the granularity to | ||
771 | * properly set this at the page level (Linux problem). | ||
772 | * If shared is set, we cause a zero PID->TID load. | ||
773 | * Many of these bits are software only. Bits we don't set | ||
774 | * here we (properly should) assume have the appropriate value. | ||
775 | */ | ||
776 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | ||
777 | |||
778 | bri finish_tlb_load | ||
779 | ex7: | ||
780 | /* The bailout. Restore registers to pre-exception conditions | ||
781 | * and call the heavyweights to help us out. | ||
782 | */ | ||
783 | mts rpid, r11 | ||
784 | nop | ||
785 | bri 4 | ||
786 | RESTORE_STATE; | ||
787 | bri page_fault_data_trap | ||
788 | |||
789 | /* 0x13 - Instruction TLB Miss Exception | ||
790 | * Nearly the same as above, except we get our information from | ||
791 | * different registers and bailout to a different point. | ||
792 | */ | ||
793 | handle_instruction_tlb_miss_exception: | ||
794 | /* Working registers already saved: R3, R4, R5, R6 | ||
795 | * R3 = ESR | ||
796 | */ | ||
797 | mfs r11, rpid | ||
798 | nop | ||
799 | bri 4 | ||
800 | mfs r3, rear /* Get faulting address */ | ||
801 | nop | ||
802 | |||
803 | /* If we are faulting a kernel address, we have to use the | ||
804 | * kernel page tables. | ||
805 | */ | ||
806 | ori r4, r0, CONFIG_KERNEL_START | ||
807 | cmpu r4, r3, r4 | ||
808 | bgti r4, ex8 | ||
809 | ori r4, r0, swapper_pg_dir | ||
810 | mts rpid, r0 /* TLB will have 0 TID */ | ||
811 | nop | ||
812 | bri ex9 | ||
813 | |||
814 | /* Get the PGD for the current thread. */ | ||
815 | ex8: | ||
816 | /* get current task address */ | ||
817 | addi r4 ,CURRENT_TASK, TOPHYS(0); | ||
818 | lwi r4, r4, TASK_THREAD+PGDIR | ||
819 | ex9: | ||
820 | tophys(r4,r4) | ||
821 | BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ | ||
822 | andi r5, r5, 0xffc | ||
823 | /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ | ||
824 | or r4, r4, r5 | ||
825 | lwi r4, r4, 0 /* Get L1 entry */ | ||
826 | andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ | ||
827 | beqi r5, ex10 /* Bail if no table */ | ||
828 | |||
829 | tophys(r5,r5) | ||
830 | BSRLI(r6,r3,10) /* Compute PTE address */ | ||
831 | andi r6, r6, 0xffc | ||
832 | andi r5, r5, 0xfffff003 | ||
833 | or r5, r5, r6 | ||
834 | lwi r4, r5, 0 /* Get Linux PTE */ | ||
835 | |||
836 | andi r6, r4, _PAGE_PRESENT | ||
837 | beqi r6, ex7 | ||
838 | |||
839 | ori r4, r4, _PAGE_ACCESSED | ||
840 | swi r4, r5, 0 | ||
841 | |||
842 | /* Most of the Linux PTE is ready to load into the TLB LO. | ||
843 | * We set ZSEL, where only the LS-bit determines user access. | ||
844 | * We set execute, because we don't have the granularity to | ||
845 | * properly set this at the page level (Linux problem). | ||
846 | * If shared is set, we cause a zero PID->TID load. | ||
847 | * Many of these bits are software only. Bits we don't set | ||
848 | * here we (properly should) assume have the appropriate value. | ||
849 | */ | ||
850 | andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ | ||
851 | |||
852 | bri finish_tlb_load | ||
853 | ex10: | ||
854 | /* The bailout. Restore registers to pre-exception conditions | ||
855 | * and call the heavyweights to help us out. | ||
856 | */ | ||
857 | mts rpid, r11 | ||
858 | nop | ||
859 | bri 4 | ||
860 | RESTORE_STATE; | ||
861 | bri page_fault_instr_trap | ||
862 | |||
863 | /* Both the instruction and data TLB miss get to this point to load the TLB. | ||
864 | * r3 - EA of fault | ||
865 | * r4 - TLB LO (info from Linux PTE) | ||
866 | * r5, r6 - available to use | ||
867 | * PID - loaded with proper value when we get here | ||
868 | * Upon exit, we reload everything and RFI. | ||
869 | * A common place to load the TLB. | ||
870 | */ | ||
871 | tlb_index: | ||
872 | .long 1 /* MS: storing last used tlb index */ | ||
873 | finish_tlb_load: | ||
874 | /* MS: load the last used TLB index. */ | ||
875 | lwi r5, r0, TOPHYS(tlb_index) | ||
876 | addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ | ||
877 | |||
878 | /* MS: FIXME this is potential fault, because this is mask not count */ | ||
879 | andi r5, r5, (MICROBLAZE_TLB_SIZE-1) | ||
880 | ori r6, r0, 1 | ||
881 | cmp r31, r5, r6 | ||
882 | blti r31, sem | ||
883 | addik r5, r6, 1 | ||
884 | sem: | ||
885 | /* MS: save back current TLB index */ | ||
886 | swi r5, r0, TOPHYS(tlb_index) | ||
887 | |||
888 | ori r4, r4, _PAGE_HWEXEC /* make it executable */ | ||
889 | mts rtlbx, r5 /* MS: save current TLB */ | ||
890 | nop | ||
891 | mts rtlblo, r4 /* MS: save to TLB LO */ | ||
892 | nop | ||
893 | |||
894 | /* Create EPN. This is the faulting address plus a static | ||
895 | * set of bits. These are size, valid, E, U0, and ensure | ||
896 | * bits 20 and 21 are zero. | ||
897 | */ | ||
898 | andi r3, r3, 0xfffff000 | ||
899 | ori r3, r3, 0x0c0 | ||
900 | mts rtlbhi, r3 /* Load TLB HI */ | ||
901 | nop | ||
902 | |||
903 | /* Done...restore registers and get out of here. */ | ||
904 | ex12: | ||
905 | mts rpid, r11 | ||
906 | nop | ||
907 | bri 4 | ||
908 | RESTORE_STATE; | ||
909 | rted r17, 0 | ||
910 | nop | ||
911 | |||
912 | /* extern void giveup_fpu(struct task_struct *prev) | ||
913 | * | ||
914 | * The MicroBlaze processor may have an FPU, so this should not just | ||
915 | * return: TBD. | ||
916 | */ | ||
917 | .globl giveup_fpu; | ||
918 | .align 4; | ||
919 | giveup_fpu: | ||
920 | bralid r15,0 /* TBD */ | ||
921 | nop | ||
922 | |||
923 | /* At present, this routine just hangs. - extern void abort(void) */ | ||
924 | .globl abort; | ||
925 | .align 4; | ||
926 | abort: | ||
927 | br r0 | ||
928 | |||
929 | .globl set_context; | ||
930 | .align 4; | ||
931 | set_context: | ||
932 | mts rpid, r5 /* Shadow TLBs are automatically */ | ||
933 | nop | ||
934 | bri 4 /* flushed by changing PID */ | ||
935 | rtsd r15,8 | ||
936 | nop | ||
937 | |||
938 | #endif | ||
370 | .end _hw_exception_handler | 939 | .end _hw_exception_handler |
371 | 940 | ||
941 | #ifdef CONFIG_MMU | ||
942 | /* Unaligned data access exception last on a 4k page for MMU. | ||
943 | * When this is called, we are in virtual mode with exceptions enabled | ||
944 | * and registers 1-13,15,17,18 saved. | ||
945 | * | ||
946 | * R3 = ESR | ||
947 | * R4 = EAR | ||
948 | * R7 = pointer to saved registers (struct pt_regs *regs) | ||
949 | * | ||
950 | * This handler perform the access, and returns via ret_from_exc. | ||
951 | */ | ||
952 | .global _unaligned_data_exception | ||
953 | .ent _unaligned_data_exception | ||
954 | _unaligned_data_exception: | ||
955 | andi r8, r3, 0x3E0; /* Mask and extract the register operand */ | ||
956 | BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */ | ||
957 | andi r6, r3, 0x400; /* Extract ESR[S] */ | ||
958 | bneid r6, ex_sw_vm; | ||
959 | andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ | ||
960 | ex_lw_vm: | ||
961 | beqid r6, ex_lhw_vm; | ||
962 | lbui r5, r4, 0; /* Exception address in r4 - delay slot */ | ||
963 | /* Load a word, byte-by-byte from destination address and save it in tmp space*/ | ||
964 | la r6, r0, ex_tmp_data_loc_0; | ||
965 | sbi r5, r6, 0; | ||
966 | lbui r5, r4, 1; | ||
967 | sbi r5, r6, 1; | ||
968 | lbui r5, r4, 2; | ||
969 | sbi r5, r6, 2; | ||
970 | lbui r5, r4, 3; | ||
971 | sbi r5, r6, 3; | ||
972 | brid ex_lw_tail_vm; | ||
973 | /* Get the destination register value into r3 - delay slot */ | ||
974 | lwi r3, r6, 0; | ||
975 | ex_lhw_vm: | ||
976 | /* Load a half-word, byte-by-byte from destination address and | ||
977 | * save it in tmp space */ | ||
978 | la r6, r0, ex_tmp_data_loc_0; | ||
979 | sbi r5, r6, 0; | ||
980 | lbui r5, r4, 1; | ||
981 | sbi r5, r6, 1; | ||
982 | lhui r3, r6, 0; /* Get the destination register value into r3 */ | ||
983 | ex_lw_tail_vm: | ||
984 | /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */ | ||
985 | addik r5, r8, lw_table_vm; | ||
986 | bra r5; | ||
987 | ex_lw_end_vm: /* Exception handling of load word, ends */ | ||
988 | brai ret_from_exc; | ||
989 | ex_sw_vm: | ||
990 | /* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */ | ||
991 | addik r5, r8, sw_table_vm; | ||
992 | bra r5; | ||
993 | ex_sw_tail_vm: | ||
994 | la r5, r0, ex_tmp_data_loc_0; | ||
995 | beqid r6, ex_shw_vm; | ||
996 | swi r3, r5, 0; /* Get the word - delay slot */ | ||
997 | /* Store the word, byte-by-byte into destination address */ | ||
998 | lbui r3, r5, 0; | ||
999 | sbi r3, r4, 0; | ||
1000 | lbui r3, r5, 1; | ||
1001 | sbi r3, r4, 1; | ||
1002 | lbui r3, r5, 2; | ||
1003 | sbi r3, r4, 2; | ||
1004 | lbui r3, r5, 3; | ||
1005 | brid ret_from_exc; | ||
1006 | sbi r3, r4, 3; /* Delay slot */ | ||
1007 | ex_shw_vm: | ||
1008 | /* Store the lower half-word, byte-by-byte into destination address */ | ||
1009 | lbui r3, r5, 2; | ||
1010 | sbi r3, r4, 0; | ||
1011 | lbui r3, r5, 3; | ||
1012 | brid ret_from_exc; | ||
1013 | sbi r3, r4, 1; /* Delay slot */ | ||
1014 | ex_sw_end_vm: /* Exception handling of store word, ends. */ | ||
1015 | .end _unaligned_data_exception | ||
1016 | #endif /* CONFIG_MMU */ | ||
1017 | |||
372 | ex_handler_unhandled: | 1018 | ex_handler_unhandled: |
373 | /* FIXME add handle function for unhandled exception - dump register */ | 1019 | /* FIXME add handle function for unhandled exception - dump register */ |
374 | bri 0 | 1020 | bri 0 |
375 | 1021 | ||
1022 | /* | ||
1023 | * hw_exception_handler Jump Table | ||
1024 | * - Contains code snippets for each register that caused the unalign exception | ||
1025 | * - Hence exception handler is NOT self-modifying | ||
1026 | * - Separate table for load exceptions and store exceptions. | ||
1027 | * - Each table is of size: (8 * 32) = 256 bytes | ||
1028 | */ | ||
1029 | |||
376 | .section .text | 1030 | .section .text |
377 | .align 4 | 1031 | .align 4 |
378 | lw_table: | 1032 | lw_table: |
@@ -407,7 +1061,11 @@ lw_r27: R3_TO_LWREG (27); | |||
407 | lw_r28: R3_TO_LWREG (28); | 1061 | lw_r28: R3_TO_LWREG (28); |
408 | lw_r29: R3_TO_LWREG (29); | 1062 | lw_r29: R3_TO_LWREG (29); |
409 | lw_r30: R3_TO_LWREG (30); | 1063 | lw_r30: R3_TO_LWREG (30); |
1064 | #ifdef CONFIG_MMU | ||
1065 | lw_r31: R3_TO_LWREG_V (31); | ||
1066 | #else | ||
410 | lw_r31: R3_TO_LWREG (31); | 1067 | lw_r31: R3_TO_LWREG (31); |
1068 | #endif | ||
411 | 1069 | ||
412 | sw_table: | 1070 | sw_table: |
413 | sw_r0: SWREG_TO_R3 (0); | 1071 | sw_r0: SWREG_TO_R3 (0); |
@@ -441,7 +1099,81 @@ sw_r27: SWREG_TO_R3 (27); | |||
441 | sw_r28: SWREG_TO_R3 (28); | 1099 | sw_r28: SWREG_TO_R3 (28); |
442 | sw_r29: SWREG_TO_R3 (29); | 1100 | sw_r29: SWREG_TO_R3 (29); |
443 | sw_r30: SWREG_TO_R3 (30); | 1101 | sw_r30: SWREG_TO_R3 (30); |
1102 | #ifdef CONFIG_MMU | ||
1103 | sw_r31: SWREG_TO_R3_V (31); | ||
1104 | #else | ||
444 | sw_r31: SWREG_TO_R3 (31); | 1105 | sw_r31: SWREG_TO_R3 (31); |
1106 | #endif | ||
1107 | |||
1108 | #ifdef CONFIG_MMU | ||
1109 | lw_table_vm: | ||
1110 | lw_r0_vm: R3_TO_LWREG_VM (0); | ||
1111 | lw_r1_vm: R3_TO_LWREG_VM_V (1); | ||
1112 | lw_r2_vm: R3_TO_LWREG_VM_V (2); | ||
1113 | lw_r3_vm: R3_TO_LWREG_VM_V (3); | ||
1114 | lw_r4_vm: R3_TO_LWREG_VM_V (4); | ||
1115 | lw_r5_vm: R3_TO_LWREG_VM_V (5); | ||
1116 | lw_r6_vm: R3_TO_LWREG_VM_V (6); | ||
1117 | lw_r7_vm: R3_TO_LWREG_VM_V (7); | ||
1118 | lw_r8_vm: R3_TO_LWREG_VM_V (8); | ||
1119 | lw_r9_vm: R3_TO_LWREG_VM_V (9); | ||
1120 | lw_r10_vm: R3_TO_LWREG_VM_V (10); | ||
1121 | lw_r11_vm: R3_TO_LWREG_VM_V (11); | ||
1122 | lw_r12_vm: R3_TO_LWREG_VM_V (12); | ||
1123 | lw_r13_vm: R3_TO_LWREG_VM_V (13); | ||
1124 | lw_r14_vm: R3_TO_LWREG_VM (14); | ||
1125 | lw_r15_vm: R3_TO_LWREG_VM_V (15); | ||
1126 | lw_r16_vm: R3_TO_LWREG_VM (16); | ||
1127 | lw_r17_vm: R3_TO_LWREG_VM_V (17); | ||
1128 | lw_r18_vm: R3_TO_LWREG_VM_V (18); | ||
1129 | lw_r19_vm: R3_TO_LWREG_VM (19); | ||
1130 | lw_r20_vm: R3_TO_LWREG_VM (20); | ||
1131 | lw_r21_vm: R3_TO_LWREG_VM (21); | ||
1132 | lw_r22_vm: R3_TO_LWREG_VM (22); | ||
1133 | lw_r23_vm: R3_TO_LWREG_VM (23); | ||
1134 | lw_r24_vm: R3_TO_LWREG_VM (24); | ||
1135 | lw_r25_vm: R3_TO_LWREG_VM (25); | ||
1136 | lw_r26_vm: R3_TO_LWREG_VM (26); | ||
1137 | lw_r27_vm: R3_TO_LWREG_VM (27); | ||
1138 | lw_r28_vm: R3_TO_LWREG_VM (28); | ||
1139 | lw_r29_vm: R3_TO_LWREG_VM (29); | ||
1140 | lw_r30_vm: R3_TO_LWREG_VM (30); | ||
1141 | lw_r31_vm: R3_TO_LWREG_VM_V (31); | ||
1142 | |||
1143 | sw_table_vm: | ||
1144 | sw_r0_vm: SWREG_TO_R3_VM (0); | ||
1145 | sw_r1_vm: SWREG_TO_R3_VM_V (1); | ||
1146 | sw_r2_vm: SWREG_TO_R3_VM_V (2); | ||
1147 | sw_r3_vm: SWREG_TO_R3_VM_V (3); | ||
1148 | sw_r4_vm: SWREG_TO_R3_VM_V (4); | ||
1149 | sw_r5_vm: SWREG_TO_R3_VM_V (5); | ||
1150 | sw_r6_vm: SWREG_TO_R3_VM_V (6); | ||
1151 | sw_r7_vm: SWREG_TO_R3_VM_V (7); | ||
1152 | sw_r8_vm: SWREG_TO_R3_VM_V (8); | ||
1153 | sw_r9_vm: SWREG_TO_R3_VM_V (9); | ||
1154 | sw_r10_vm: SWREG_TO_R3_VM_V (10); | ||
1155 | sw_r11_vm: SWREG_TO_R3_VM_V (11); | ||
1156 | sw_r12_vm: SWREG_TO_R3_VM_V (12); | ||
1157 | sw_r13_vm: SWREG_TO_R3_VM_V (13); | ||
1158 | sw_r14_vm: SWREG_TO_R3_VM (14); | ||
1159 | sw_r15_vm: SWREG_TO_R3_VM_V (15); | ||
1160 | sw_r16_vm: SWREG_TO_R3_VM (16); | ||
1161 | sw_r17_vm: SWREG_TO_R3_VM_V (17); | ||
1162 | sw_r18_vm: SWREG_TO_R3_VM_V (18); | ||
1163 | sw_r19_vm: SWREG_TO_R3_VM (19); | ||
1164 | sw_r20_vm: SWREG_TO_R3_VM (20); | ||
1165 | sw_r21_vm: SWREG_TO_R3_VM (21); | ||
1166 | sw_r22_vm: SWREG_TO_R3_VM (22); | ||
1167 | sw_r23_vm: SWREG_TO_R3_VM (23); | ||
1168 | sw_r24_vm: SWREG_TO_R3_VM (24); | ||
1169 | sw_r25_vm: SWREG_TO_R3_VM (25); | ||
1170 | sw_r26_vm: SWREG_TO_R3_VM (26); | ||
1171 | sw_r27_vm: SWREG_TO_R3_VM (27); | ||
1172 | sw_r28_vm: SWREG_TO_R3_VM (28); | ||
1173 | sw_r29_vm: SWREG_TO_R3_VM (29); | ||
1174 | sw_r30_vm: SWREG_TO_R3_VM (30); | ||
1175 | sw_r31_vm: SWREG_TO_R3_VM_V (31); | ||
1176 | #endif /* CONFIG_MMU */ | ||
445 | 1177 | ||
446 | /* Temporary data structures used in the handler */ | 1178 | /* Temporary data structures used in the handler */ |
447 | .section .data | 1179 | .section .data |
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 5f71790e3c3c..59ff20e33e0c 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c | |||
@@ -45,3 +45,5 @@ extern void __udivsi3(void); | |||
45 | EXPORT_SYMBOL(__udivsi3); | 45 | EXPORT_SYMBOL(__udivsi3); |
46 | extern void __umodsi3(void); | 46 | extern void __umodsi3(void); |
47 | EXPORT_SYMBOL(__umodsi3); | 47 | EXPORT_SYMBOL(__umodsi3); |
48 | extern char *_ebss; | ||
49 | EXPORT_SYMBOL_GPL(_ebss); | ||
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S new file mode 100644 index 000000000000..df16c6287a8e --- /dev/null +++ b/arch/microblaze/kernel/misc.S | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Miscellaneous low-level MMU functions. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | ||
5 | * Copyright (C) 2008-2009 PetaLogix | ||
6 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | ||
7 | * | ||
8 | * Derived from arch/ppc/kernel/misc.S | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General | ||
11 | * Public License. See the file COPYING in the main directory of this | ||
12 | * archive for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/sys.h> | ||
17 | #include <asm/unistd.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <asm/mmu.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | .text | ||
23 | /* | ||
24 | * Flush MMU TLB | ||
25 | * | ||
26 | * We avoid flushing the pinned 0, 1 and possibly 2 entries. | ||
27 | */ | ||
28 | .globl _tlbia; | ||
29 | .align 4; | ||
30 | _tlbia: | ||
31 | addik r12, r0, 63 /* flush all entries (63 - 3) */ | ||
32 | /* isync */ | ||
33 | _tlbia_1: | ||
34 | mts rtlbx, r12 | ||
35 | nop | ||
36 | mts rtlbhi, r0 /* flush: ensure V is clear */ | ||
37 | nop | ||
38 | addik r11, r12, -2 | ||
39 | bneid r11, _tlbia_1 /* loop for all entries */ | ||
40 | addik r12, r12, -1 | ||
41 | /* sync */ | ||
42 | rtsd r15, 8 | ||
43 | nop | ||
44 | |||
45 | /* | ||
46 | * Flush MMU TLB for a particular address (in r5) | ||
47 | */ | ||
48 | .globl _tlbie; | ||
49 | .align 4; | ||
50 | _tlbie: | ||
51 | mts rtlbsx, r5 /* look up the address in TLB */ | ||
52 | nop | ||
53 | mfs r12, rtlbx /* Retrieve index */ | ||
54 | nop | ||
55 | blti r12, _tlbie_1 /* Check if found */ | ||
56 | mts rtlbhi, r0 /* flush: ensure V is clear */ | ||
57 | nop | ||
58 | _tlbie_1: | ||
59 | rtsd r15, 8 | ||
60 | nop | ||
61 | |||
62 | /* | ||
63 | * Allocate TLB entry for early console | ||
64 | */ | ||
65 | .globl early_console_reg_tlb_alloc; | ||
66 | .align 4; | ||
67 | early_console_reg_tlb_alloc: | ||
68 | /* | ||
69 | * Load a TLB entry for the UART, so that microblaze_progress() can use | ||
70 | * the UARTs nice and early. We use a 4k real==virtual mapping. | ||
71 | */ | ||
72 | ori r4, r0, 63 | ||
73 | mts rtlbx, r4 /* TLB slot 2 */ | ||
74 | |||
75 | or r4,r5,r0 | ||
76 | andi r4,r4,0xfffff000 | ||
77 | ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) | ||
78 | |||
79 | andi r5,r5,0xfffff000 | ||
80 | ori r5,r5,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) | ||
81 | |||
82 | mts rtlblo,r4 /* Load the data portion of the entry */ | ||
83 | nop | ||
84 | mts rtlbhi,r5 /* Load the tag portion of the entry */ | ||
85 | nop | ||
86 | rtsd r15, 8 | ||
87 | nop | ||
88 | |||
89 | /* | ||
90 | * Copy a whole page (4096 bytes). | ||
91 | */ | ||
92 | #define COPY_16_BYTES \ | ||
93 | lwi r7, r6, 0; \ | ||
94 | lwi r8, r6, 4; \ | ||
95 | lwi r9, r6, 8; \ | ||
96 | lwi r10, r6, 12; \ | ||
97 | swi r7, r5, 0; \ | ||
98 | swi r8, r5, 4; \ | ||
99 | swi r9, r5, 8; \ | ||
100 | swi r10, r5, 12 | ||
101 | |||
102 | |||
103 | /* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/ | ||
104 | #define DCACHE_LINE_BYTES (4 * 4) | ||
105 | |||
106 | .globl copy_page; | ||
107 | .align 4; | ||
108 | copy_page: | ||
109 | ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 | ||
110 | _copy_page_loop: | ||
111 | COPY_16_BYTES | ||
112 | #if DCACHE_LINE_BYTES >= 32 | ||
113 | COPY_16_BYTES | ||
114 | #endif | ||
115 | addik r6, r6, DCACHE_LINE_BYTES | ||
116 | addik r5, r5, DCACHE_LINE_BYTES | ||
117 | bneid r11, _copy_page_loop | ||
118 | addik r11, r11, -1 | ||
119 | rtsd r15, 8 | ||
120 | nop | ||
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 07d4fa339eda..00b12c6d5326 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -126,9 +126,54 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
126 | else | 126 | else |
127 | childregs->r1 = ((unsigned long) ti) + THREAD_SIZE; | 127 | childregs->r1 = ((unsigned long) ti) + THREAD_SIZE; |
128 | 128 | ||
129 | #ifndef CONFIG_MMU | ||
129 | memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); | 130 | memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); |
130 | ti->cpu_context.r1 = (unsigned long)childregs; | 131 | ti->cpu_context.r1 = (unsigned long)childregs; |
131 | ti->cpu_context.msr = (unsigned long)childregs->msr; | 132 | ti->cpu_context.msr = (unsigned long)childregs->msr; |
133 | #else | ||
134 | |||
135 | /* if creating a kernel thread then update the current reg (we don't | ||
136 | * want to use the parent's value when restoring by POP_STATE) */ | ||
137 | if (kernel_mode(regs)) | ||
138 | /* save new current on stack to use POP_STATE */ | ||
139 | childregs->CURRENT_TASK = (unsigned long)p; | ||
140 | /* if returning to user then use the parent's value of this register */ | ||
141 | |||
142 | /* if we're creating a new kernel thread then just zeroing all | ||
143 | * the registers. That's OK for a brand new thread.*/ | ||
144 | /* Pls. note that some of them will be restored in POP_STATE */ | ||
145 | if (kernel_mode(regs)) | ||
146 | memset(&ti->cpu_context, 0, sizeof(struct cpu_context)); | ||
147 | /* if this thread is created for fork/vfork/clone, then we want to | ||
148 | * restore all the parent's context */ | ||
149 | /* in addition to the registers which will be restored by POP_STATE */ | ||
150 | else { | ||
151 | ti->cpu_context = *(struct cpu_context *)regs; | ||
152 | childregs->msr |= MSR_UMS; | ||
153 | } | ||
154 | |||
155 | /* FIXME STATE_SAVE_PT_OFFSET; */ | ||
156 | ti->cpu_context.r1 = (unsigned long)childregs - STATE_SAVE_ARG_SPACE; | ||
157 | /* we should consider the fact that childregs is a copy of the parent | ||
158 | * regs which were saved immediately after entering the kernel state | ||
159 | * before enabling VM. This MSR will be restored in switch_to and | ||
160 | * RETURN() and we want to have the right machine state there | ||
161 | * specifically this state must have INTs disabled before and enabled | ||
162 | * after performing rtbd | ||
163 | * compose the right MSR for RETURN(). It will work for switch_to also | ||
164 | * excepting for VM and UMS | ||
165 | * don't touch UMS , CARRY and cache bits | ||
166 | * right now MSR is a copy of parent one */ | ||
167 | childregs->msr |= MSR_BIP; | ||
168 | childregs->msr &= ~MSR_EIP; | ||
169 | childregs->msr |= MSR_IE; | ||
170 | childregs->msr &= ~MSR_VM; | ||
171 | childregs->msr |= MSR_VMS; | ||
172 | childregs->msr |= MSR_EE; /* exceptions will be enabled*/ | ||
173 | |||
174 | ti->cpu_context.msr = (childregs->msr|MSR_VM); | ||
175 | ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */ | ||
176 | #endif | ||
132 | ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; | 177 | ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; |
133 | 178 | ||
134 | if (clone_flags & CLONE_SETTLS) | 179 | if (clone_flags & CLONE_SETTLS) |
@@ -137,6 +182,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
137 | return 0; | 182 | return 0; |
138 | } | 183 | } |
139 | 184 | ||
185 | #ifndef CONFIG_MMU | ||
140 | /* | 186 | /* |
141 | * Return saved PC of a blocked thread. | 187 | * Return saved PC of a blocked thread. |
142 | */ | 188 | */ |
@@ -151,6 +197,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
151 | else | 197 | else |
152 | return ctx->r14; | 198 | return ctx->r14; |
153 | } | 199 | } |
200 | #endif | ||
154 | 201 | ||
155 | static void kernel_thread_helper(int (*fn)(void *), void *arg) | 202 | static void kernel_thread_helper(int (*fn)(void *), void *arg) |
156 | { | 203 | { |
@@ -173,6 +220,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
173 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, | 220 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, |
174 | ®s, 0, NULL, NULL); | 221 | ®s, 0, NULL, NULL); |
175 | } | 222 | } |
223 | EXPORT_SYMBOL_GPL(kernel_thread); | ||
176 | 224 | ||
177 | unsigned long get_wchan(struct task_struct *p) | 225 | unsigned long get_wchan(struct task_struct *p) |
178 | { | 226 | { |
@@ -188,3 +236,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) | |||
188 | regs->r1 = usp; | 236 | regs->r1 = usp; |
189 | regs->pt_mode = 0; | 237 | regs->pt_mode = 0; |
190 | } | 238 | } |
239 | |||
240 | #ifdef CONFIG_MMU | ||
241 | #include <linux/elfcore.h> | ||
242 | /* | ||
243 | * Set up a thread for executing a new program | ||
244 | */ | ||
245 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) | ||
246 | { | ||
247 | return 0; /* MicroBlaze has no separate FPU registers */ | ||
248 | } | ||
249 | #endif /* CONFIG_MMU */ | ||
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index 34c48718061a..c005cc6f1aaf 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c | |||
@@ -509,12 +509,13 @@ static void __init early_init_dt_check_for_initrd(unsigned long node) | |||
509 | 509 | ||
510 | prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); | 510 | prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); |
511 | if (prop) { | 511 | if (prop) { |
512 | initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); | 512 | initrd_start = (unsigned long) |
513 | __va((u32)of_read_ulong(prop, l/4)); | ||
513 | 514 | ||
514 | prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); | 515 | prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); |
515 | if (prop) { | 516 | if (prop) { |
516 | initrd_end = (unsigned long) | 517 | initrd_end = (unsigned long) |
517 | __va(of_read_ulong(prop, l/4)); | 518 | __va((u32)of_read_ulong(prop, 1/4)); |
518 | initrd_below_start_ok = 1; | 519 | initrd_below_start_ok = 1; |
519 | } else { | 520 | } else { |
520 | initrd_start = 0; | 521 | initrd_start = 0; |
@@ -563,7 +564,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
563 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); | 564 | strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); |
564 | 565 | ||
565 | #ifdef CONFIG_CMDLINE | 566 | #ifdef CONFIG_CMDLINE |
567 | #ifndef CONFIG_CMDLINE_FORCE | ||
566 | if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) | 568 | if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) |
569 | #endif | ||
567 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | 570 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); |
568 | #endif /* CONFIG_CMDLINE */ | 571 | #endif /* CONFIG_CMDLINE */ |
569 | 572 | ||
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index eb6b41758e23..8709bea09604 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c | |||
@@ -42,10 +42,6 @@ char cmd_line[COMMAND_LINE_SIZE]; | |||
42 | 42 | ||
43 | void __init setup_arch(char **cmdline_p) | 43 | void __init setup_arch(char **cmdline_p) |
44 | { | 44 | { |
45 | #ifdef CONFIG_CMDLINE_FORCE | ||
46 | strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
47 | strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); | ||
48 | #endif | ||
49 | *cmdline_p = cmd_line; | 45 | *cmdline_p = cmd_line; |
50 | 46 | ||
51 | console_verbose(); | 47 | console_verbose(); |
@@ -102,14 +98,34 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
102 | { | 98 | { |
103 | unsigned long *src, *dst = (unsigned long *)0x0; | 99 | unsigned long *src, *dst = (unsigned long *)0x0; |
104 | 100 | ||
101 | /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the | ||
102 | * end of kernel. There are two position which we want to check. | ||
103 | * The first is __init_end and the second __bss_start. | ||
104 | */ | ||
105 | #ifdef CONFIG_MTD_UCLINUX | ||
106 | int romfs_size; | ||
107 | unsigned int romfs_base; | ||
108 | char *old_klimit = klimit; | ||
109 | |||
110 | romfs_base = (ram ? ram : (unsigned int)&__init_end); | ||
111 | romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); | ||
112 | if (!romfs_size) { | ||
113 | romfs_base = (unsigned int)&__bss_start; | ||
114 | romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); | ||
115 | } | ||
116 | |||
117 | /* Move ROMFS out of BSS before clearing it */ | ||
118 | if (romfs_size > 0) { | ||
119 | memmove(&_ebss, (int *)romfs_base, romfs_size); | ||
120 | klimit += romfs_size; | ||
121 | } | ||
122 | #endif | ||
123 | |||
105 | /* clearing bss section */ | 124 | /* clearing bss section */ |
106 | memset(__bss_start, 0, __bss_stop-__bss_start); | 125 | memset(__bss_start, 0, __bss_stop-__bss_start); |
107 | memset(_ssbss, 0, _esbss-_ssbss); | 126 | memset(_ssbss, 0, _esbss-_ssbss); |
108 | 127 | ||
109 | /* | 128 | /* Copy command line passed from bootloader */ |
110 | * Copy command line passed from bootloader, or use default | ||
111 | * if none provided, or forced | ||
112 | */ | ||
113 | #ifndef CONFIG_CMDLINE_BOOL | 129 | #ifndef CONFIG_CMDLINE_BOOL |
114 | if (cmdline && cmdline[0] != '\0') | 130 | if (cmdline && cmdline[0] != '\0') |
115 | strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); | 131 | strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); |
@@ -126,27 +142,15 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, | |||
126 | printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); | 142 | printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); |
127 | 143 | ||
128 | #ifdef CONFIG_MTD_UCLINUX | 144 | #ifdef CONFIG_MTD_UCLINUX |
129 | { | 145 | early_printk("Found romfs @ 0x%08x (0x%08x)\n", |
130 | int size; | 146 | romfs_base, romfs_size); |
131 | unsigned int romfs_base; | 147 | early_printk("#### klimit %p ####\n", old_klimit); |
132 | romfs_base = (ram ? ram : (unsigned int)&__init_end); | 148 | BUG_ON(romfs_size < 0); /* What else can we do? */ |
133 | /* if CONFIG_MTD_UCLINUX_EBSS is defined, assume ROMFS is at the | 149 | |
134 | * end of kernel, which is ROMFS_LOCATION defined above. */ | 150 | early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", |
135 | size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base)); | 151 | romfs_size, romfs_base, (unsigned)&_ebss); |
136 | early_printk("Found romfs @ 0x%08x (0x%08x)\n", | 152 | |
137 | romfs_base, size); | 153 | early_printk("New klimit: 0x%08x\n", (unsigned)klimit); |
138 | early_printk("#### klimit %p ####\n", klimit); | ||
139 | BUG_ON(size < 0); /* What else can we do? */ | ||
140 | |||
141 | /* Use memmove to handle likely case of memory overlap */ | ||
142 | early_printk("Moving 0x%08x bytes from 0x%08x to 0x%08x\n", | ||
143 | size, romfs_base, (unsigned)&_ebss); | ||
144 | memmove(&_ebss, (int *)romfs_base, size); | ||
145 | |||
146 | /* update klimit */ | ||
147 | klimit += PAGE_ALIGN(size); | ||
148 | early_printk("New klimit: 0x%08x\n", (unsigned)klimit); | ||
149 | } | ||
150 | #endif | 154 | #endif |
151 | 155 | ||
152 | for (src = __ivt_start; src < __ivt_end; src++, dst++) | 156 | for (src = __ivt_start; src < __ivt_end; src++, dst++) |
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 40d36931e363..4c0e6521b114 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c | |||
@@ -152,8 +152,8 @@ struct rt_sigframe { | |||
152 | unsigned long tramp[2]; /* signal trampoline */ | 152 | unsigned long tramp[2]; /* signal trampoline */ |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static int | 155 | static int restore_sigcontext(struct pt_regs *regs, |
156 | restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *rval_p) | 156 | struct sigcontext __user *sc, int *rval_p) |
157 | { | 157 | { |
158 | unsigned int err = 0; | 158 | unsigned int err = 0; |
159 | 159 | ||
@@ -211,11 +211,10 @@ badframe: | |||
211 | 211 | ||
212 | asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) | 212 | asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) |
213 | { | 213 | { |
214 | struct rt_sigframe *frame = | 214 | struct rt_sigframe __user *frame = |
215 | (struct rt_sigframe *)(regs->r1 + STATE_SAVE_ARG_SPACE); | 215 | (struct rt_sigframe __user *)(regs->r1 + STATE_SAVE_ARG_SPACE); |
216 | 216 | ||
217 | sigset_t set; | 217 | sigset_t set; |
218 | stack_t st; | ||
219 | int rval; | 218 | int rval; |
220 | 219 | ||
221 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 220 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
@@ -233,11 +232,10 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) | |||
233 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) | 232 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) |
234 | goto badframe; | 233 | goto badframe; |
235 | 234 | ||
236 | if (__copy_from_user((void *)&st, &frame->uc.uc_stack, sizeof(st))) | ||
237 | goto badframe; | ||
238 | /* It is more difficult to avoid calling this function than to | 235 | /* It is more difficult to avoid calling this function than to |
239 | call it and ignore errors. */ | 236 | call it and ignore errors. */ |
240 | do_sigaltstack(&st, NULL, regs->r1); | 237 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->r1)) |
238 | goto badframe; | ||
241 | 239 | ||
242 | return rval; | 240 | return rval; |
243 | 241 | ||
@@ -251,7 +249,7 @@ badframe: | |||
251 | */ | 249 | */ |
252 | 250 | ||
253 | static int | 251 | static int |
254 | setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | 252 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, |
255 | unsigned long mask) | 253 | unsigned long mask) |
256 | { | 254 | { |
257 | int err = 0; | 255 | int err = 0; |
@@ -278,7 +276,7 @@ setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, | |||
278 | /* | 276 | /* |
279 | * Determine which stack to use.. | 277 | * Determine which stack to use.. |
280 | */ | 278 | */ |
281 | static inline void * | 279 | static inline void __user * |
282 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 280 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) |
283 | { | 281 | { |
284 | /* Default to using normal stack */ | 282 | /* Default to using normal stack */ |
@@ -287,87 +285,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | |||
287 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) | 285 | if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack(sp)) |
288 | sp = current->sas_ss_sp + current->sas_ss_size; | 286 | sp = current->sas_ss_sp + current->sas_ss_size; |
289 | 287 | ||
290 | return (void *)((sp - frame_size) & -8UL); | 288 | return (void __user *)((sp - frame_size) & -8UL); |
291 | } | ||
292 | |||
293 | static void setup_frame(int sig, struct k_sigaction *ka, | ||
294 | sigset_t *set, struct pt_regs *regs) | ||
295 | { | ||
296 | struct sigframe *frame; | ||
297 | int err = 0; | ||
298 | int signal; | ||
299 | |||
300 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
301 | |||
302 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
303 | goto give_sigsegv; | ||
304 | |||
305 | signal = current_thread_info()->exec_domain | ||
306 | && current_thread_info()->exec_domain->signal_invmap | ||
307 | && sig < 32 | ||
308 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
309 | : sig; | ||
310 | |||
311 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); | ||
312 | |||
313 | if (_NSIG_WORDS > 1) { | ||
314 | err |= __copy_to_user(frame->extramask, &set->sig[1], | ||
315 | sizeof(frame->extramask)); | ||
316 | } | ||
317 | |||
318 | /* Set up to return from userspace. If provided, use a stub | ||
319 | already in userspace. */ | ||
320 | /* minus 8 is offset to cater for "rtsd r15,8" offset */ | ||
321 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
322 | regs->r15 = ((unsigned long)ka->sa.sa_restorer)-8; | ||
323 | } else { | ||
324 | /* Note, these encodings are _big endian_! */ | ||
325 | |||
326 | /* addi r12, r0, __NR_sigreturn */ | ||
327 | err |= __put_user(0x31800000 | __NR_sigreturn , | ||
328 | frame->tramp + 0); | ||
329 | /* brki r14, 0x8 */ | ||
330 | err |= __put_user(0xb9cc0008, frame->tramp + 1); | ||
331 | |||
332 | /* Return from sighandler will jump to the tramp. | ||
333 | Negative 8 offset because return is rtsd r15, 8 */ | ||
334 | regs->r15 = ((unsigned long)frame->tramp)-8; | ||
335 | |||
336 | __invalidate_cache_sigtramp((unsigned long)frame->tramp); | ||
337 | } | ||
338 | |||
339 | if (err) | ||
340 | goto give_sigsegv; | ||
341 | |||
342 | /* Set up registers for signal handler */ | ||
343 | regs->r1 = (unsigned long) frame - STATE_SAVE_ARG_SPACE; | ||
344 | |||
345 | /* Signal handler args: */ | ||
346 | regs->r5 = signal; /* Arg 0: signum */ | ||
347 | regs->r6 = (unsigned long) &frame->sc; /* arg 1: sigcontext */ | ||
348 | |||
349 | /* Offset of 4 to handle microblaze rtid r14, 0 */ | ||
350 | regs->pc = (unsigned long)ka->sa.sa_handler; | ||
351 | |||
352 | set_fs(USER_DS); | ||
353 | |||
354 | #ifdef DEBUG_SIG | ||
355 | printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n", | ||
356 | current->comm, current->pid, frame, regs->pc); | ||
357 | #endif | ||
358 | |||
359 | return; | ||
360 | |||
361 | give_sigsegv: | ||
362 | if (sig == SIGSEGV) | ||
363 | ka->sa.sa_handler = SIG_DFL; | ||
364 | force_sig(SIGSEGV, current); | ||
365 | } | 289 | } |
366 | 290 | ||
367 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 291 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
368 | sigset_t *set, struct pt_regs *regs) | 292 | sigset_t *set, struct pt_regs *regs) |
369 | { | 293 | { |
370 | struct rt_sigframe *frame; | 294 | struct rt_sigframe __user *frame; |
371 | int err = 0; | 295 | int err = 0; |
372 | int signal; | 296 | int signal; |
373 | 297 | ||
@@ -382,7 +306,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
382 | ? current_thread_info()->exec_domain->signal_invmap[sig] | 306 | ? current_thread_info()->exec_domain->signal_invmap[sig] |
383 | : sig; | 307 | : sig; |
384 | 308 | ||
385 | err |= copy_siginfo_to_user(&frame->info, info); | 309 | if (info) |
310 | err |= copy_siginfo_to_user(&frame->info, info); | ||
386 | 311 | ||
387 | /* Create the ucontext. */ | 312 | /* Create the ucontext. */ |
388 | err |= __put_user(0, &frame->uc.uc_flags); | 313 | err |= __put_user(0, &frame->uc.uc_flags); |
@@ -463,7 +388,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | |||
463 | case -ERESTARTNOINTR: | 388 | case -ERESTARTNOINTR: |
464 | do_restart: | 389 | do_restart: |
465 | /* offset of 4 bytes to re-execute trap (brki) instruction */ | 390 | /* offset of 4 bytes to re-execute trap (brki) instruction */ |
391 | #ifndef CONFIG_MMU | ||
466 | regs->pc -= 4; | 392 | regs->pc -= 4; |
393 | #else | ||
394 | /* offset of 8 bytes required = 4 for rtbd | ||
395 | offset, plus 4 for size of | ||
396 | "brki r14,8" | ||
397 | instruction. */ | ||
398 | regs->pc -= 8; | ||
399 | #endif | ||
467 | break; | 400 | break; |
468 | } | 401 | } |
469 | } | 402 | } |
@@ -480,7 +413,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
480 | if (ka->sa.sa_flags & SA_SIGINFO) | 413 | if (ka->sa.sa_flags & SA_SIGINFO) |
481 | setup_rt_frame(sig, ka, info, oldset, regs); | 414 | setup_rt_frame(sig, ka, info, oldset, regs); |
482 | else | 415 | else |
483 | setup_frame(sig, ka, oldset, regs); | 416 | setup_rt_frame(sig, ka, NULL, oldset, regs); |
484 | 417 | ||
485 | if (ka->sa.sa_flags & SA_ONESHOT) | 418 | if (ka->sa.sa_flags & SA_ONESHOT) |
486 | ka->sa.sa_handler = SIG_DFL; | 419 | ka->sa.sa_handler = SIG_DFL; |
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 3bb42ec924c2..376d1789f7c0 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S | |||
@@ -2,7 +2,11 @@ ENTRY(sys_call_table) | |||
2 | .long sys_restart_syscall /* 0 - old "setup()" system call, | 2 | .long sys_restart_syscall /* 0 - old "setup()" system call, |
3 | * used for restarting */ | 3 | * used for restarting */ |
4 | .long sys_exit | 4 | .long sys_exit |
5 | .long sys_ni_syscall /* was fork */ | 5 | #ifdef CONFIG_MMU |
6 | .long sys_fork_wrapper | ||
7 | #else | ||
8 | .long sys_ni_syscall | ||
9 | #endif | ||
6 | .long sys_read | 10 | .long sys_read |
7 | .long sys_write | 11 | .long sys_write |
8 | .long sys_open /* 5 */ | 12 | .long sys_open /* 5 */ |
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index 293ef486013a..eaaaf805f31b 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c | |||
@@ -22,14 +22,6 @@ void trap_init(void) | |||
22 | __enable_hw_exceptions(); | 22 | __enable_hw_exceptions(); |
23 | } | 23 | } |
24 | 24 | ||
25 | void __bad_xchg(volatile void *ptr, int size) | ||
26 | { | ||
27 | printk(KERN_INFO "xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", | ||
28 | __builtin_return_address(0), ptr, size); | ||
29 | BUG(); | ||
30 | } | ||
31 | EXPORT_SYMBOL(__bad_xchg); | ||
32 | |||
33 | static int kstack_depth_to_print = 24; | 25 | static int kstack_depth_to_print = 24; |
34 | 26 | ||
35 | static int __init kstack_setup(char *s) | 27 | static int __init kstack_setup(char *s) |
@@ -105,3 +97,37 @@ void dump_stack(void) | |||
105 | show_stack(NULL, NULL); | 97 | show_stack(NULL, NULL); |
106 | } | 98 | } |
107 | EXPORT_SYMBOL(dump_stack); | 99 | EXPORT_SYMBOL(dump_stack); |
100 | |||
101 | #ifdef CONFIG_MMU | ||
102 | void __bug(const char *file, int line, void *data) | ||
103 | { | ||
104 | if (data) | ||
105 | printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n", | ||
106 | file, line, data); | ||
107 | else | ||
108 | printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line); | ||
109 | |||
110 | machine_halt(); | ||
111 | } | ||
112 | |||
113 | int bad_trap(int trap_num, struct pt_regs *regs) | ||
114 | { | ||
115 | printk(KERN_CRIT | ||
116 | "unimplemented trap %d called at 0x%08lx, pid %d!\n", | ||
117 | trap_num, regs->pc, current->pid); | ||
118 | return -ENOSYS; | ||
119 | } | ||
120 | |||
121 | int debug_trap(struct pt_regs *regs) | ||
122 | { | ||
123 | int i; | ||
124 | printk(KERN_CRIT "debug trap\n"); | ||
125 | for (i = 0; i < 32; i++) { | ||
126 | /* printk("r%i:%08X\t",i,regs->gpr[i]); */ | ||
127 | if ((i % 4) == 3) | ||
128 | printk(KERN_CRIT "\n"); | ||
129 | } | ||
130 | printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr); | ||
131 | return -ENOSYS; | ||
132 | } | ||
133 | #endif | ||
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 840385e51291..8ae807ab7a51 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S | |||
@@ -17,8 +17,7 @@ ENTRY(_start) | |||
17 | jiffies = jiffies_64 + 4; | 17 | jiffies = jiffies_64 + 4; |
18 | 18 | ||
19 | SECTIONS { | 19 | SECTIONS { |
20 | . = CONFIG_KERNEL_BASE_ADDR; | 20 | . = CONFIG_KERNEL_START; |
21 | |||
22 | .text : { | 21 | .text : { |
23 | _text = . ; | 22 | _text = . ; |
24 | _stext = . ; | 23 | _stext = . ; |
@@ -132,6 +131,8 @@ SECTIONS { | |||
132 | __con_initcall_end = .; | 131 | __con_initcall_end = .; |
133 | } | 132 | } |
134 | 133 | ||
134 | SECURITY_INIT | ||
135 | |||
135 | __init_end_before_initramfs = .; | 136 | __init_end_before_initramfs = .; |
136 | 137 | ||
137 | .init.ramfs ALIGN(4096) : { | 138 | .init.ramfs ALIGN(4096) : { |
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile index d27126bf306a..71c8cb6c9e43 100644 --- a/arch/microblaze/lib/Makefile +++ b/arch/microblaze/lib/Makefile | |||
@@ -10,4 +10,5 @@ else | |||
10 | lib-y += memcpy.o memmove.o | 10 | lib-y += memcpy.o memmove.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | lib-y += uaccess.o | 13 | lib-$(CONFIG_NO_MMU) += uaccess.o |
14 | lib-$(CONFIG_MMU) += uaccess_old.o | ||
diff --git a/arch/microblaze/lib/checksum.c b/arch/microblaze/lib/checksum.c index 809340070a13..f08e74591418 100644 --- a/arch/microblaze/lib/checksum.c +++ b/arch/microblaze/lib/checksum.c | |||
@@ -32,9 +32,10 @@ | |||
32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access | 32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access |
33 | kills, so most of the assembly has to go. */ | 33 | kills, so most of the assembly has to go. */ |
34 | 34 | ||
35 | #include <net/checksum.h> | ||
36 | #include <asm/checksum.h> | ||
37 | #include <linux/module.h> | 35 | #include <linux/module.h> |
36 | #include <net/checksum.h> | ||
37 | |||
38 | #include <asm/byteorder.h> | ||
38 | 39 | ||
39 | static inline unsigned short from32to16(unsigned long x) | 40 | static inline unsigned short from32to16(unsigned long x) |
40 | { | 41 | { |
@@ -102,6 +103,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
102 | { | 103 | { |
103 | return (__force __sum16)~do_csum(iph, ihl*4); | 104 | return (__force __sum16)~do_csum(iph, ihl*4); |
104 | } | 105 | } |
106 | EXPORT_SYMBOL(ip_fast_csum); | ||
105 | 107 | ||
106 | /* | 108 | /* |
107 | * computes the checksum of a memory block at buff, length len, | 109 | * computes the checksum of a memory block at buff, length len, |
@@ -115,15 +117,16 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
115 | * | 117 | * |
116 | * it's best to have buff aligned on a 32-bit boundary | 118 | * it's best to have buff aligned on a 32-bit boundary |
117 | */ | 119 | */ |
118 | __wsum csum_partial(const void *buff, int len, __wsum sum) | 120 | __wsum csum_partial(const void *buff, int len, __wsum wsum) |
119 | { | 121 | { |
122 | unsigned int sum = (__force unsigned int)wsum; | ||
120 | unsigned int result = do_csum(buff, len); | 123 | unsigned int result = do_csum(buff, len); |
121 | 124 | ||
122 | /* add in old sum, and carry.. */ | 125 | /* add in old sum, and carry.. */ |
123 | result += sum; | 126 | result += sum; |
124 | if (sum > result) | 127 | if (sum > result) |
125 | result += 1; | 128 | result += 1; |
126 | return result; | 129 | return (__force __wsum)result; |
127 | } | 130 | } |
128 | EXPORT_SYMBOL(csum_partial); | 131 | EXPORT_SYMBOL(csum_partial); |
129 | 132 | ||
@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial); | |||
131 | * this routine is used for miscellaneous IP-like checksums, mainly | 134 | * this routine is used for miscellaneous IP-like checksums, mainly |
132 | * in icmp.c | 135 | * in icmp.c |
133 | */ | 136 | */ |
134 | __sum16 ip_compute_csum(const unsigned char *buff, int len) | 137 | __sum16 ip_compute_csum(const void *buff, int len) |
135 | { | 138 | { |
136 | return ~do_csum(buff, len); | 139 | return (__force __sum16)~do_csum(buff, len); |
137 | } | 140 | } |
138 | EXPORT_SYMBOL(ip_compute_csum); | 141 | EXPORT_SYMBOL(ip_compute_csum); |
139 | 142 | ||
@@ -141,12 +144,18 @@ EXPORT_SYMBOL(ip_compute_csum); | |||
141 | * copy from fs while checksumming, otherwise like csum_partial | 144 | * copy from fs while checksumming, otherwise like csum_partial |
142 | */ | 145 | */ |
143 | __wsum | 146 | __wsum |
144 | csum_partial_copy_from_user(const char __user *src, char *dst, int len, | 147 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, |
145 | int sum, int *csum_err) | 148 | __wsum sum, int *csum_err) |
146 | { | 149 | { |
147 | if (csum_err) | 150 | int missing; |
151 | |||
152 | missing = __copy_from_user(dst, src, len); | ||
153 | if (missing) { | ||
154 | memset(dst + len - missing, 0, missing); | ||
155 | *csum_err = -EFAULT; | ||
156 | } else | ||
148 | *csum_err = 0; | 157 | *csum_err = 0; |
149 | memcpy(dst, src, len); | 158 | |
150 | return csum_partial(dst, len, sum); | 159 | return csum_partial(dst, len, sum); |
151 | } | 160 | } |
152 | EXPORT_SYMBOL(csum_partial_copy_from_user); | 161 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
@@ -155,7 +164,7 @@ EXPORT_SYMBOL(csum_partial_copy_from_user); | |||
155 | * copy from ds while checksumming, otherwise like csum_partial | 164 | * copy from ds while checksumming, otherwise like csum_partial |
156 | */ | 165 | */ |
157 | __wsum | 166 | __wsum |
158 | csum_partial_copy(const char *src, char *dst, int len, int sum) | 167 | csum_partial_copy(const void *src, void *dst, int len, __wsum sum) |
159 | { | 168 | { |
160 | memcpy(dst, src, len); | 169 | memcpy(dst, src, len); |
161 | return csum_partial(dst, len, sum); | 170 | return csum_partial(dst, len, sum); |
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index 5880119c4487..6a907c58a4bc 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c | |||
@@ -154,8 +154,3 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) | |||
154 | } | 154 | } |
155 | EXPORT_SYMBOL(memcpy); | 155 | EXPORT_SYMBOL(memcpy); |
156 | #endif /* __HAVE_ARCH_MEMCPY */ | 156 | #endif /* __HAVE_ARCH_MEMCPY */ |
157 | |||
158 | void *cacheable_memcpy(void *d, const void *s, __kernel_size_t c) | ||
159 | { | ||
160 | return memcpy(d, s, c); | ||
161 | } | ||
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S new file mode 100644 index 000000000000..67f991c14b8a --- /dev/null +++ b/arch/microblaze/lib/uaccess_old.S | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> | ||
3 | * Copyright (C) 2009 PetaLogix | ||
4 | * Copyright (C) 2007 LynuxWorks, Inc. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/errno.h> | ||
12 | #include <linux/linkage.h> | ||
13 | |||
14 | /* | ||
15 | * int __strncpy_user(char *to, char *from, int len); | ||
16 | * | ||
17 | * Returns: | ||
18 | * -EFAULT for an exception | ||
19 | * len if we hit the buffer limit | ||
20 | * bytes copied | ||
21 | */ | ||
22 | |||
23 | .text | ||
24 | .globl __strncpy_user; | ||
25 | .align 4; | ||
26 | __strncpy_user: | ||
27 | |||
28 | /* | ||
29 | * r5 - to | ||
30 | * r6 - from | ||
31 | * r7 - len | ||
32 | * r3 - temp count | ||
33 | * r4 - temp val | ||
34 | */ | ||
35 | addik r3,r7,0 /* temp_count = len */ | ||
36 | beqi r3,3f | ||
37 | 1: | ||
38 | lbu r4,r6,r0 | ||
39 | sb r4,r5,r0 | ||
40 | |||
41 | addik r3,r3,-1 | ||
42 | beqi r3,2f /* break on len */ | ||
43 | |||
44 | addik r5,r5,1 | ||
45 | bneid r4,1b | ||
46 | addik r6,r6,1 /* delay slot */ | ||
47 | addik r3,r3,1 /* undo "temp_count--" */ | ||
48 | 2: | ||
49 | rsubk r3,r3,r7 /* temp_count = len - temp_count */ | ||
50 | 3: | ||
51 | rtsd r15,8 | ||
52 | nop | ||
53 | |||
54 | |||
55 | .section .fixup, "ax" | ||
56 | .align 2 | ||
57 | 4: | ||
58 | brid 3b | ||
59 | addik r3,r0, -EFAULT | ||
60 | |||
61 | .section __ex_table, "a" | ||
62 | .word 1b,4b | ||
63 | |||
64 | /* | ||
65 | * int __strnlen_user(char __user *str, int maxlen); | ||
66 | * | ||
67 | * Returns: | ||
68 | * 0 on error | ||
69 | * maxlen + 1 if no NUL byte found within maxlen bytes | ||
70 | * size of the string (including NUL byte) | ||
71 | */ | ||
72 | |||
73 | .text | ||
74 | .globl __strnlen_user; | ||
75 | .align 4; | ||
76 | __strnlen_user: | ||
77 | addik r3,r6,0 | ||
78 | beqi r3,3f | ||
79 | 1: | ||
80 | lbu r4,r5,r0 | ||
81 | beqid r4,2f /* break on NUL */ | ||
82 | addik r3,r3,-1 /* delay slot */ | ||
83 | |||
84 | bneid r3,1b | ||
85 | addik r5,r5,1 /* delay slot */ | ||
86 | |||
87 | addik r3,r3,-1 /* for break on len */ | ||
88 | 2: | ||
89 | rsubk r3,r3,r6 | ||
90 | 3: | ||
91 | rtsd r15,8 | ||
92 | nop | ||
93 | |||
94 | |||
95 | .section .fixup,"ax" | ||
96 | 4: | ||
97 | brid 3b | ||
98 | addk r3,r0,r0 | ||
99 | |||
100 | .section __ex_table,"a" | ||
101 | .word 1b,4b | ||
102 | |||
103 | /* | ||
104 | * int __copy_tofrom_user(char *to, char *from, int len) | ||
105 | * Return: | ||
106 | * 0 on success | ||
107 | * number of not copied bytes on error | ||
108 | */ | ||
109 | .text | ||
110 | .globl __copy_tofrom_user; | ||
111 | .align 4; | ||
112 | __copy_tofrom_user: | ||
113 | /* | ||
114 | * r5 - to | ||
115 | * r6 - from | ||
116 | * r7, r3 - count | ||
117 | * r4 - tempval | ||
118 | */ | ||
119 | addik r3,r7,0 | ||
120 | beqi r3,3f | ||
121 | 1: | ||
122 | lbu r4,r6,r0 | ||
123 | addik r6,r6,1 | ||
124 | 2: | ||
125 | sb r4,r5,r0 | ||
126 | addik r3,r3,-1 | ||
127 | bneid r3,1b | ||
128 | addik r5,r5,1 /* delay slot */ | ||
129 | 3: | ||
130 | rtsd r15,8 | ||
131 | nop | ||
132 | |||
133 | |||
134 | .section __ex_table,"a" | ||
135 | .word 1b,3b,2b,3b | ||
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index bf9e4479a1fd..6c8a924d9e26 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile | |||
@@ -3,3 +3,5 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o | 5 | obj-y := init.o |
6 | |||
7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o | ||
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c new file mode 100644 index 000000000000..5e67cd1fab40 --- /dev/null +++ b/arch/microblaze/mm/fault.c | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * arch/microblaze/mm/fault.c | ||
3 | * | ||
4 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | ||
5 | * | ||
6 | * Derived from "arch/ppc/mm/fault.c" | ||
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
8 | * | ||
9 | * Derived from "arch/i386/mm/fault.c" | ||
10 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
11 | * | ||
12 | * Modified by Cort Dougan and Paul Mackerras. | ||
13 | * | ||
14 | * This file is subject to the terms and conditions of the GNU General | ||
15 | * Public License. See the file COPYING in the main directory of this | ||
16 | * archive for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/mman.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | |||
32 | #include <asm/page.h> | ||
33 | #include <asm/pgtable.h> | ||
34 | #include <asm/mmu.h> | ||
35 | #include <asm/mmu_context.h> | ||
36 | #include <asm/system.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | #include <asm/exceptions.h> | ||
39 | |||
40 | #if defined(CONFIG_KGDB) | ||
41 | int debugger_kernel_faults = 1; | ||
42 | #endif | ||
43 | |||
44 | static unsigned long pte_misses; /* updated by do_page_fault() */ | ||
45 | static unsigned long pte_errors; /* updated by do_page_fault() */ | ||
46 | |||
47 | /* | ||
48 | * Check whether the instruction at regs->pc is a store using | ||
49 | * an update addressing form which will update r1. | ||
50 | */ | ||
51 | static int store_updates_sp(struct pt_regs *regs) | ||
52 | { | ||
53 | unsigned int inst; | ||
54 | |||
55 | if (get_user(inst, (unsigned int *)regs->pc)) | ||
56 | return 0; | ||
57 | /* check for 1 in the rD field */ | ||
58 | if (((inst >> 21) & 0x1f) != 1) | ||
59 | return 0; | ||
60 | /* check for store opcodes */ | ||
61 | if ((inst & 0xd0000000) == 0xd0000000) | ||
62 | return 1; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | |||
67 | /* | ||
68 | * bad_page_fault is called when we have a bad access from the kernel. | ||
69 | * It is called from do_page_fault above and from some of the procedures | ||
70 | * in traps.c. | ||
71 | */ | ||
72 | static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | ||
73 | { | ||
74 | const struct exception_table_entry *fixup; | ||
75 | /* MS: no context */ | ||
76 | /* Are we prepared to handle this fault? */ | ||
77 | fixup = search_exception_tables(regs->pc); | ||
78 | if (fixup) { | ||
79 | regs->pc = fixup->fixup; | ||
80 | return; | ||
81 | } | ||
82 | |||
83 | /* kernel has accessed a bad area */ | ||
84 | #if defined(CONFIG_KGDB) | ||
85 | if (debugger_kernel_faults) | ||
86 | debugger(regs); | ||
87 | #endif | ||
88 | die("kernel access of bad area", regs, sig); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * The error_code parameter is ESR for a data fault, | ||
93 | * 0 for an instruction fault. | ||
94 | */ | ||
95 | void do_page_fault(struct pt_regs *regs, unsigned long address, | ||
96 | unsigned long error_code) | ||
97 | { | ||
98 | struct vm_area_struct *vma; | ||
99 | struct mm_struct *mm = current->mm; | ||
100 | siginfo_t info; | ||
101 | int code = SEGV_MAPERR; | ||
102 | int is_write = error_code & ESR_S; | ||
103 | int fault; | ||
104 | |||
105 | regs->ear = address; | ||
106 | regs->esr = error_code; | ||
107 | |||
108 | /* On a kernel SLB miss we can only check for a valid exception entry */ | ||
109 | if (kernel_mode(regs) && (address >= TASK_SIZE)) { | ||
110 | printk(KERN_WARNING "kernel task_size exceed"); | ||
111 | _exception(SIGSEGV, regs, code, address); | ||
112 | } | ||
113 | |||
114 | /* for instr TLB miss and instr storage exception ESR_S is undefined */ | ||
115 | if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) | ||
116 | is_write = 0; | ||
117 | |||
118 | #if defined(CONFIG_KGDB) | ||
119 | if (debugger_fault_handler && regs->trap == 0x300) { | ||
120 | debugger_fault_handler(regs); | ||
121 | return; | ||
122 | } | ||
123 | #endif /* CONFIG_KGDB */ | ||
124 | |||
125 | if (in_atomic() || mm == NULL) { | ||
126 | /* FIXME */ | ||
127 | if (kernel_mode(regs)) { | ||
128 | printk(KERN_EMERG | ||
129 | "Page fault in kernel mode - Oooou!!! pid %d\n", | ||
130 | current->pid); | ||
131 | _exception(SIGSEGV, regs, code, address); | ||
132 | return; | ||
133 | } | ||
134 | /* in_atomic() in user mode is really bad, | ||
135 | as is current->mm == NULL. */ | ||
136 | printk(KERN_EMERG "Page fault in user mode with " | ||
137 | "in_atomic(), mm = %p\n", mm); | ||
138 | printk(KERN_EMERG "r15 = %lx MSR = %lx\n", | ||
139 | regs->r15, regs->msr); | ||
140 | die("Weird page fault", regs, SIGSEGV); | ||
141 | } | ||
142 | |||
143 | /* When running in the kernel we expect faults to occur only to | ||
144 | * addresses in user space. All other faults represent errors in the | ||
145 | * kernel and should generate an OOPS. Unfortunately, in the case of an | ||
146 | * erroneous fault occurring in a code path which already holds mmap_sem | ||
147 | * we will deadlock attempting to validate the fault against the | ||
148 | * address space. Luckily the kernel only validly references user | ||
149 | * space from well defined areas of code, which are listed in the | ||
150 | * exceptions table. | ||
151 | * | ||
152 | * As the vast majority of faults will be valid we will only perform | ||
153 | * the source reference check when there is a possibility of a deadlock. | ||
154 | * Attempt to lock the address space, if we cannot we then validate the | ||
155 | * source. If this is invalid we can skip the address space check, | ||
156 | * thus avoiding the deadlock. | ||
157 | */ | ||
158 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
159 | if (kernel_mode(regs) && !search_exception_tables(regs->pc)) | ||
160 | goto bad_area_nosemaphore; | ||
161 | |||
162 | down_read(&mm->mmap_sem); | ||
163 | } | ||
164 | |||
165 | vma = find_vma(mm, address); | ||
166 | if (!vma) | ||
167 | goto bad_area; | ||
168 | |||
169 | if (vma->vm_start <= address) | ||
170 | goto good_area; | ||
171 | |||
172 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
173 | goto bad_area; | ||
174 | |||
175 | if (!is_write) | ||
176 | goto bad_area; | ||
177 | |||
178 | /* | ||
179 | * N.B. The ABI allows programs to access up to | ||
180 | * a few hundred bytes below the stack pointer (TBD). | ||
181 | * The kernel signal delivery code writes up to about 1.5kB | ||
182 | * below the stack pointer (r1) before decrementing it. | ||
183 | * The exec code can write slightly over 640kB to the stack | ||
184 | * before setting the user r1. Thus we allow the stack to | ||
185 | * expand to 1MB without further checks. | ||
186 | */ | ||
187 | if (address + 0x100000 < vma->vm_end) { | ||
188 | |||
189 | /* get user regs even if this fault is in kernel mode */ | ||
190 | struct pt_regs *uregs = current->thread.regs; | ||
191 | if (uregs == NULL) | ||
192 | goto bad_area; | ||
193 | |||
194 | /* | ||
195 | * A user-mode access to an address a long way below | ||
196 | * the stack pointer is only valid if the instruction | ||
197 | * is one which would update the stack pointer to the | ||
198 | * address accessed if the instruction completed, | ||
199 | * i.e. either stwu rs,n(r1) or stwux rs,r1,rb | ||
200 | * (or the byte, halfword, float or double forms). | ||
201 | * | ||
202 | * If we don't check this then any write to the area | ||
203 | * between the last mapped region and the stack will | ||
204 | * expand the stack rather than segfaulting. | ||
205 | */ | ||
206 | if (address + 2048 < uregs->r1 | ||
207 | && (kernel_mode(regs) || !store_updates_sp(regs))) | ||
208 | goto bad_area; | ||
209 | } | ||
210 | if (expand_stack(vma, address)) | ||
211 | goto bad_area; | ||
212 | |||
213 | good_area: | ||
214 | code = SEGV_ACCERR; | ||
215 | |||
216 | /* a write */ | ||
217 | if (is_write) { | ||
218 | if (!(vma->vm_flags & VM_WRITE)) | ||
219 | goto bad_area; | ||
220 | /* a read */ | ||
221 | } else { | ||
222 | /* protection fault */ | ||
223 | if (error_code & 0x08000000) | ||
224 | goto bad_area; | ||
225 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
226 | goto bad_area; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * If for any reason at all we couldn't handle the fault, | ||
231 | * make sure we exit gracefully rather than endlessly redo | ||
232 | * the fault. | ||
233 | */ | ||
234 | survive: | ||
235 | fault = handle_mm_fault(mm, vma, address, is_write); | ||
236 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
237 | if (fault & VM_FAULT_OOM) | ||
238 | goto out_of_memory; | ||
239 | else if (fault & VM_FAULT_SIGBUS) | ||
240 | goto do_sigbus; | ||
241 | BUG(); | ||
242 | } | ||
243 | if (fault & VM_FAULT_MAJOR) | ||
244 | current->maj_flt++; | ||
245 | else | ||
246 | current->min_flt++; | ||
247 | up_read(&mm->mmap_sem); | ||
248 | /* | ||
249 | * keep track of tlb+htab misses that are good addrs but | ||
250 | * just need pte's created via handle_mm_fault() | ||
251 | * -- Cort | ||
252 | */ | ||
253 | pte_misses++; | ||
254 | return; | ||
255 | |||
256 | bad_area: | ||
257 | up_read(&mm->mmap_sem); | ||
258 | |||
259 | bad_area_nosemaphore: | ||
260 | pte_errors++; | ||
261 | |||
262 | /* User mode accesses cause a SIGSEGV */ | ||
263 | if (user_mode(regs)) { | ||
264 | _exception(SIGSEGV, regs, code, address); | ||
265 | /* info.si_signo = SIGSEGV; | ||
266 | info.si_errno = 0; | ||
267 | info.si_code = code; | ||
268 | info.si_addr = (void *) address; | ||
269 | force_sig_info(SIGSEGV, &info, current);*/ | ||
270 | return; | ||
271 | } | ||
272 | |||
273 | bad_page_fault(regs, address, SIGSEGV); | ||
274 | return; | ||
275 | |||
276 | /* | ||
277 | * We ran out of memory, or some other thing happened to us that made | ||
278 | * us unable to handle the page fault gracefully. | ||
279 | */ | ||
280 | out_of_memory: | ||
281 | if (current->pid == 1) { | ||
282 | yield(); | ||
283 | down_read(&mm->mmap_sem); | ||
284 | goto survive; | ||
285 | } | ||
286 | up_read(&mm->mmap_sem); | ||
287 | printk(KERN_WARNING "VM: killing process %s\n", current->comm); | ||
288 | if (user_mode(regs)) | ||
289 | do_exit(SIGKILL); | ||
290 | bad_page_fault(regs, address, SIGKILL); | ||
291 | return; | ||
292 | |||
293 | do_sigbus: | ||
294 | up_read(&mm->mmap_sem); | ||
295 | if (user_mode(regs)) { | ||
296 | info.si_signo = SIGBUS; | ||
297 | info.si_errno = 0; | ||
298 | info.si_code = BUS_ADRERR; | ||
299 | info.si_addr = (void __user *)address; | ||
300 | force_sig_info(SIGBUS, &info, current); | ||
301 | return; | ||
302 | } | ||
303 | bad_page_fault(regs, address, SIGBUS); | ||
304 | } | ||
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index b0c8213cd6cf..b5a701cd71e0 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -23,8 +23,16 @@ | |||
23 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
24 | #include <asm/tlb.h> | 24 | #include <asm/tlb.h> |
25 | 25 | ||
26 | #ifndef CONFIG_MMU | ||
26 | unsigned int __page_offset; | 27 | unsigned int __page_offset; |
27 | /* EXPORT_SYMBOL(__page_offset); */ | 28 | EXPORT_SYMBOL(__page_offset); |
29 | |||
30 | #else | ||
31 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
32 | |||
33 | int mem_init_done; | ||
34 | static int init_bootmem_done; | ||
35 | #endif /* CONFIG_MMU */ | ||
28 | 36 | ||
29 | char *klimit = _end; | 37 | char *klimit = _end; |
30 | 38 | ||
@@ -32,28 +40,26 @@ char *klimit = _end; | |||
32 | * Initialize the bootmem system and give it all the memory we | 40 | * Initialize the bootmem system and give it all the memory we |
33 | * have available. | 41 | * have available. |
34 | */ | 42 | */ |
35 | unsigned int memory_start; | 43 | unsigned long memory_start; |
36 | unsigned int memory_end; /* due to mm/nommu.c */ | 44 | unsigned long memory_end; /* due to mm/nommu.c */ |
37 | unsigned int memory_size; | 45 | unsigned long memory_size; |
38 | 46 | ||
39 | /* | 47 | /* |
40 | * paging_init() sets up the page tables - in fact we've already done this. | 48 | * paging_init() sets up the page tables - in fact we've already done this. |
41 | */ | 49 | */ |
42 | static void __init paging_init(void) | 50 | static void __init paging_init(void) |
43 | { | 51 | { |
44 | int i; | ||
45 | unsigned long zones_size[MAX_NR_ZONES]; | 52 | unsigned long zones_size[MAX_NR_ZONES]; |
46 | 53 | ||
54 | /* Clean every zones */ | ||
55 | memset(zones_size, 0, sizeof(zones_size)); | ||
56 | |||
47 | /* | 57 | /* |
48 | * old: we can DMA to/from any address.put all page into ZONE_DMA | 58 | * old: we can DMA to/from any address.put all page into ZONE_DMA |
49 | * We use only ZONE_NORMAL | 59 | * We use only ZONE_NORMAL |
50 | */ | 60 | */ |
51 | zones_size[ZONE_NORMAL] = max_mapnr; | 61 | zones_size[ZONE_NORMAL] = max_mapnr; |
52 | 62 | ||
53 | /* every other zones are empty */ | ||
54 | for (i = 1; i < MAX_NR_ZONES; i++) | ||
55 | zones_size[i] = 0; | ||
56 | |||
57 | free_area_init(zones_size); | 63 | free_area_init(zones_size); |
58 | } | 64 | } |
59 | 65 | ||
@@ -61,6 +67,7 @@ void __init setup_memory(void) | |||
61 | { | 67 | { |
62 | int i; | 68 | int i; |
63 | unsigned long map_size; | 69 | unsigned long map_size; |
70 | #ifndef CONFIG_MMU | ||
64 | u32 kernel_align_start, kernel_align_size; | 71 | u32 kernel_align_start, kernel_align_size; |
65 | 72 | ||
66 | /* Find main memory where is the kernel */ | 73 | /* Find main memory where is the kernel */ |
@@ -93,6 +100,7 @@ void __init setup_memory(void) | |||
93 | __func__, kernel_align_start, kernel_align_start | 100 | __func__, kernel_align_start, kernel_align_start |
94 | + kernel_align_size, kernel_align_size); | 101 | + kernel_align_size, kernel_align_size); |
95 | 102 | ||
103 | #endif | ||
96 | /* | 104 | /* |
97 | * Kernel: | 105 | * Kernel: |
98 | * start: base phys address of kernel - page align | 106 | * start: base phys address of kernel - page align |
@@ -121,9 +129,13 @@ void __init setup_memory(void) | |||
121 | * for 4GB of memory, using 4kB pages), plus 1 page | 129 | * for 4GB of memory, using 4kB pages), plus 1 page |
122 | * (in case the address isn't page-aligned). | 130 | * (in case the address isn't page-aligned). |
123 | */ | 131 | */ |
132 | #ifndef CONFIG_MMU | ||
124 | map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), | 133 | map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), |
125 | min_low_pfn, max_low_pfn); | 134 | min_low_pfn, max_low_pfn); |
126 | 135 | #else | |
136 | map_size = init_bootmem_node(&contig_page_data, | ||
137 | PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn); | ||
138 | #endif | ||
127 | lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); | 139 | lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); |
128 | 140 | ||
129 | /* free bootmem is whole main memory */ | 141 | /* free bootmem is whole main memory */ |
@@ -137,6 +149,9 @@ void __init setup_memory(void) | |||
137 | reserve_bootmem(lmb.reserved.region[i].base, | 149 | reserve_bootmem(lmb.reserved.region[i].base, |
138 | lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); | 150 | lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); |
139 | } | 151 | } |
152 | #ifdef CONFIG_MMU | ||
153 | init_bootmem_done = 1; | ||
154 | #endif | ||
140 | paging_init(); | 155 | paging_init(); |
141 | } | 156 | } |
142 | 157 | ||
@@ -191,11 +206,145 @@ void __init mem_init(void) | |||
191 | printk(KERN_INFO "Memory: %luk/%luk available\n", | 206 | printk(KERN_INFO "Memory: %luk/%luk available\n", |
192 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 207 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
193 | num_physpages << (PAGE_SHIFT-10)); | 208 | num_physpages << (PAGE_SHIFT-10)); |
209 | #ifdef CONFIG_MMU | ||
210 | mem_init_done = 1; | ||
211 | #endif | ||
194 | } | 212 | } |
195 | 213 | ||
214 | #ifndef CONFIG_MMU | ||
196 | /* Check against bounds of physical memory */ | 215 | /* Check against bounds of physical memory */ |
197 | int ___range_ok(unsigned long addr, unsigned long size) | 216 | int ___range_ok(unsigned long addr, unsigned long size) |
198 | { | 217 | { |
199 | return ((addr < memory_start) || | 218 | return ((addr < memory_start) || |
200 | ((addr + size) > memory_end)); | 219 | ((addr + size) > memory_end)); |
201 | } | 220 | } |
221 | EXPORT_SYMBOL(___range_ok); | ||
222 | |||
223 | #else | ||
224 | int page_is_ram(unsigned long pfn) | ||
225 | { | ||
226 | return pfn < max_low_pfn; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Check for command-line options that affect what MMU_init will do. | ||
231 | */ | ||
232 | static void mm_cmdline_setup(void) | ||
233 | { | ||
234 | unsigned long maxmem = 0; | ||
235 | char *p = cmd_line; | ||
236 | |||
237 | /* Look for mem= option on command line */ | ||
238 | p = strstr(cmd_line, "mem="); | ||
239 | if (p) { | ||
240 | p += 4; | ||
241 | maxmem = memparse(p, &p); | ||
242 | if (maxmem && memory_size > maxmem) { | ||
243 | memory_size = maxmem; | ||
244 | memory_end = memory_start + memory_size; | ||
245 | lmb.memory.region[0].size = memory_size; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | ||
252 | */ | ||
253 | static void __init mmu_init_hw(void) | ||
254 | { | ||
255 | /* | ||
256 | * The Zone Protection Register (ZPR) defines how protection will | ||
257 | * be applied to every page which is a member of a given zone. At | ||
258 | * present, we utilize only two of the zones. | ||
259 | * The zone index bits (of ZSEL) in the PTE are used for software | ||
260 | * indicators, except the LSB. For user access, zone 1 is used, | ||
261 | * for kernel access, zone 0 is used. We set all but zone 1 | ||
262 | * to zero, allowing only kernel access as indicated in the PTE. | ||
263 | * For zone 1, we set a 01 binary (a value of 10 will not work) | ||
264 | * to allow user access as indicated in the PTE. This also allows | ||
265 | * kernel access as indicated in the PTE. | ||
266 | */ | ||
267 | __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ | ||
268 | "mts rzpr, r11;" | ||
269 | : : : "r11"); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * MMU_init sets up the basic memory mappings for the kernel, | ||
274 | * including both RAM and possibly some I/O regions, | ||
275 | * and sets up the page tables and the MMU hardware ready to go. | ||
276 | */ | ||
277 | |||
278 | /* called from head.S */ | ||
279 | asmlinkage void __init mmu_init(void) | ||
280 | { | ||
281 | unsigned int kstart, ksize; | ||
282 | |||
283 | if (!lmb.reserved.cnt) { | ||
284 | printk(KERN_EMERG "Error memory count\n"); | ||
285 | machine_restart(NULL); | ||
286 | } | ||
287 | |||
288 | if ((u32) lmb.memory.region[0].size < 0x1000000) { | ||
289 | printk(KERN_EMERG "Memory must be greater than 16MB\n"); | ||
290 | machine_restart(NULL); | ||
291 | } | ||
292 | /* Find main memory where the kernel is */ | ||
293 | memory_start = (u32) lmb.memory.region[0].base; | ||
294 | memory_end = (u32) lmb.memory.region[0].base + | ||
295 | (u32) lmb.memory.region[0].size; | ||
296 | memory_size = memory_end - memory_start; | ||
297 | |||
298 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ | ||
299 | |||
300 | /* | ||
301 | * Map out the kernel text/data/bss from the available physical | ||
302 | * memory. | ||
303 | */ | ||
304 | kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ | ||
305 | /* kernel size */ | ||
306 | ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); | ||
307 | lmb_reserve(kstart, ksize); | ||
308 | |||
309 | #if defined(CONFIG_BLK_DEV_INITRD) | ||
310 | /* Remove the init RAM disk from the available memory. */ | ||
311 | /* if (initrd_start) { | ||
312 | mem_pieces_remove(&phys_avail, __pa(initrd_start), | ||
313 | initrd_end - initrd_start, 1); | ||
314 | }*/ | ||
315 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
316 | |||
317 | /* Initialize the MMU hardware */ | ||
318 | mmu_init_hw(); | ||
319 | |||
320 | /* Map in all of RAM starting at CONFIG_KERNEL_START */ | ||
321 | mapin_ram(); | ||
322 | |||
323 | #ifdef HIGHMEM_START_BOOL | ||
324 | ioremap_base = HIGHMEM_START; | ||
325 | #else | ||
326 | ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ | ||
327 | #endif /* CONFIG_HIGHMEM */ | ||
328 | ioremap_bot = ioremap_base; | ||
329 | |||
330 | /* Initialize the context management stuff */ | ||
331 | mmu_context_init(); | ||
332 | } | ||
333 | |||
334 | /* This is only called until mem_init is done. */ | ||
335 | void __init *early_get_page(void) | ||
336 | { | ||
337 | void *p; | ||
338 | if (init_bootmem_done) { | ||
339 | p = alloc_bootmem_pages(PAGE_SIZE); | ||
340 | } else { | ||
341 | /* | ||
342 | * Mem start + 32MB -> here is limit | ||
343 | * because of mem mapping from head.S | ||
344 | */ | ||
345 | p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
346 | memory_start + 0x2000000)); | ||
347 | } | ||
348 | return p; | ||
349 | } | ||
350 | #endif /* CONFIG_MMU */ | ||
diff --git a/arch/microblaze/mm/mmu_context.c b/arch/microblaze/mm/mmu_context.c new file mode 100644 index 000000000000..26ff82f4fa8f --- /dev/null +++ b/arch/microblaze/mm/mmu_context.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * This file contains the routines for handling the MMU. | ||
3 | * | ||
4 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | ||
5 | * | ||
6 | * Derived from arch/ppc/mm/4xx_mmu.c: | ||
7 | * -- paulus | ||
8 | * | ||
9 | * Derived from arch/ppc/mm/init.c: | ||
10 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
11 | * | ||
12 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
13 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
14 | * Copyright (C) 1996 Paul Mackerras | ||
15 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
16 | * | ||
17 | * Derived from "arch/i386/mm/init.c" | ||
18 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or | ||
21 | * modify it under the terms of the GNU General Public License | ||
22 | * as published by the Free Software Foundation; either version | ||
23 | * 2 of the License, or (at your option) any later version. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/mm.h> | ||
28 | #include <linux/init.h> | ||
29 | |||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/mmu_context.h> | ||
32 | |||
33 | mm_context_t next_mmu_context; | ||
34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | ||
35 | atomic_t nr_free_contexts; | ||
36 | struct mm_struct *context_mm[LAST_CONTEXT+1]; | ||
37 | |||
38 | /* | ||
39 | * Initialize the context management stuff. | ||
40 | */ | ||
41 | void __init mmu_context_init(void) | ||
42 | { | ||
43 | /* | ||
44 | * The use of context zero is reserved for the kernel. | ||
45 | * This code assumes FIRST_CONTEXT < 32. | ||
46 | */ | ||
47 | context_map[0] = (1 << FIRST_CONTEXT) - 1; | ||
48 | next_mmu_context = FIRST_CONTEXT; | ||
49 | atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Steal a context from a task that has one at the moment. | ||
54 | * | ||
55 | * This isn't an LRU system, it just frees up each context in | ||
56 | * turn (sort-of pseudo-random replacement :). This would be the | ||
57 | * place to implement an LRU scheme if anyone were motivated to do it. | ||
58 | */ | ||
59 | void steal_context(void) | ||
60 | { | ||
61 | struct mm_struct *mm; | ||
62 | |||
63 | /* free up context `next_mmu_context' */ | ||
64 | /* if we shouldn't free context 0, don't... */ | ||
65 | if (next_mmu_context < FIRST_CONTEXT) | ||
66 | next_mmu_context = FIRST_CONTEXT; | ||
67 | mm = context_mm[next_mmu_context]; | ||
68 | flush_tlb_mm(mm); | ||
69 | destroy_context(mm); | ||
70 | } | ||
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c new file mode 100644 index 000000000000..46c4ca5d15c5 --- /dev/null +++ b/arch/microblaze/mm/pgtable.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * This file contains the routines setting up the linux page tables. | ||
3 | * | ||
4 | * Copyright (C) 2008 Michal Simek | ||
5 | * Copyright (C) 2008 PetaLogix | ||
6 | * | ||
7 | * Copyright (C) 2007 Xilinx, Inc. All rights reserved. | ||
8 | * | ||
9 | * Derived from arch/ppc/mm/pgtable.c: | ||
10 | * -- paulus | ||
11 | * | ||
12 | * Derived from arch/ppc/mm/init.c: | ||
13 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
14 | * | ||
15 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
16 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
17 | * Copyright (C) 1996 Paul Mackerras | ||
18 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
19 | * | ||
20 | * Derived from "arch/i386/mm/init.c" | ||
21 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
22 | * | ||
23 | * This file is subject to the terms and conditions of the GNU General | ||
24 | * Public License. See the file COPYING in the main directory of this | ||
25 | * archive for more details. | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/types.h> | ||
32 | #include <linux/vmalloc.h> | ||
33 | #include <linux/init.h> | ||
34 | |||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <linux/io.h> | ||
38 | #include <asm/mmu.h> | ||
39 | #include <asm/sections.h> | ||
40 | |||
41 | #define flush_HPTE(X, va, pg) _tlbie(va) | ||
42 | |||
43 | unsigned long ioremap_base; | ||
44 | unsigned long ioremap_bot; | ||
45 | |||
46 | /* The maximum lowmem defaults to 768Mb, but this can be configured to | ||
47 | * another value. | ||
48 | */ | ||
49 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE | ||
50 | |||
51 | #ifndef CONFIG_SMP | ||
52 | struct pgtable_cache_struct quicklists; | ||
53 | #endif | ||
54 | |||
55 | static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, | ||
56 | unsigned long flags) | ||
57 | { | ||
58 | unsigned long v, i; | ||
59 | phys_addr_t p; | ||
60 | int err; | ||
61 | |||
62 | /* | ||
63 | * Choose an address to map it to. | ||
64 | * Once the vmalloc system is running, we use it. | ||
65 | * Before then, we use space going down from ioremap_base | ||
66 | * (ioremap_bot records where we're up to). | ||
67 | */ | ||
68 | p = addr & PAGE_MASK; | ||
69 | size = PAGE_ALIGN(addr + size) - p; | ||
70 | |||
71 | /* | ||
72 | * Don't allow anybody to remap normal RAM that we're using. | ||
73 | * mem_init() sets high_memory so only do the check after that. | ||
74 | * | ||
75 | * However, allow remap of rootfs: TBD | ||
76 | */ | ||
77 | if (mem_init_done && | ||
78 | p >= memory_start && p < virt_to_phys(high_memory) && | ||
79 | !(p >= virt_to_phys((unsigned long)&__bss_stop) && | ||
80 | p < virt_to_phys((unsigned long)__bss_stop))) { | ||
81 | printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT | ||
82 | " is RAM lr %p\n", (unsigned long)p, | ||
83 | __builtin_return_address(0)); | ||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | if (size == 0) | ||
88 | return NULL; | ||
89 | |||
90 | /* | ||
91 | * Is it already mapped? If the whole area is mapped then we're | ||
92 | * done, otherwise remap it since we want to keep the virt addrs for | ||
93 | * each request contiguous. | ||
94 | * | ||
95 | * We make the assumption here that if the bottom and top | ||
96 | * of the range we want are mapped then it's mapped to the | ||
97 | * same virt address (and this is contiguous). | ||
98 | * -- Cort | ||
99 | */ | ||
100 | |||
101 | if (mem_init_done) { | ||
102 | struct vm_struct *area; | ||
103 | area = get_vm_area(size, VM_IOREMAP); | ||
104 | if (area == NULL) | ||
105 | return NULL; | ||
106 | v = VMALLOC_VMADDR(area->addr); | ||
107 | } else { | ||
108 | v = (ioremap_bot -= size); | ||
109 | } | ||
110 | |||
111 | if ((flags & _PAGE_PRESENT) == 0) | ||
112 | flags |= _PAGE_KERNEL; | ||
113 | if (flags & _PAGE_NO_CACHE) | ||
114 | flags |= _PAGE_GUARDED; | ||
115 | |||
116 | err = 0; | ||
117 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | ||
118 | err = map_page(v + i, p + i, flags); | ||
119 | if (err) { | ||
120 | if (mem_init_done) | ||
121 | vfree((void *)v); | ||
122 | return NULL; | ||
123 | } | ||
124 | |||
125 | return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); | ||
126 | } | ||
127 | |||
128 | void __iomem *ioremap(phys_addr_t addr, unsigned long size) | ||
129 | { | ||
130 | return __ioremap(addr, size, _PAGE_NO_CACHE); | ||
131 | } | ||
132 | EXPORT_SYMBOL(ioremap); | ||
133 | |||
134 | void iounmap(void *addr) | ||
135 | { | ||
136 | if (addr > high_memory && (unsigned long) addr < ioremap_bot) | ||
137 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | ||
138 | } | ||
139 | EXPORT_SYMBOL(iounmap); | ||
140 | |||
141 | |||
142 | int map_page(unsigned long va, phys_addr_t pa, int flags) | ||
143 | { | ||
144 | pmd_t *pd; | ||
145 | pte_t *pg; | ||
146 | int err = -ENOMEM; | ||
147 | /* spin_lock(&init_mm.page_table_lock); */ | ||
148 | /* Use upper 10 bits of VA to index the first level map */ | ||
149 | pd = pmd_offset(pgd_offset_k(va), va); | ||
150 | /* Use middle 10 bits of VA to index the second-level map */ | ||
151 | pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ | ||
152 | /* pg = pte_alloc_kernel(&init_mm, pd, va); */ | ||
153 | |||
154 | if (pg != NULL) { | ||
155 | err = 0; | ||
156 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, | ||
157 | __pgprot(flags))); | ||
158 | if (mem_init_done) | ||
159 | flush_HPTE(0, va, pmd_val(*pd)); | ||
160 | /* flush_HPTE(0, va, pg); */ | ||
161 | |||
162 | } | ||
163 | /* spin_unlock(&init_mm.page_table_lock); */ | ||
164 | return err; | ||
165 | } | ||
166 | |||
167 | void __init adjust_total_lowmem(void) | ||
168 | { | ||
169 | /* TBD */ | ||
170 | #if 0 | ||
171 | unsigned long max_low_mem = MAX_LOW_MEM; | ||
172 | |||
173 | if (total_lowmem > max_low_mem) { | ||
174 | total_lowmem = max_low_mem; | ||
175 | #ifndef CONFIG_HIGHMEM | ||
176 | printk(KERN_INFO "Warning, memory limited to %ld Mb, use " | ||
177 | "CONFIG_HIGHMEM to reach %ld Mb\n", | ||
178 | max_low_mem >> 20, total_memory >> 20); | ||
179 | total_memory = total_lowmem; | ||
180 | #endif /* CONFIG_HIGHMEM */ | ||
181 | } | ||
182 | #endif | ||
183 | } | ||
184 | |||
185 | static void show_tmem(unsigned long tmem) | ||
186 | { | ||
187 | volatile unsigned long a; | ||
188 | a = a + tmem; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Map in all of physical memory starting at CONFIG_KERNEL_START. | ||
193 | */ | ||
194 | void __init mapin_ram(void) | ||
195 | { | ||
196 | unsigned long v, p, s, f; | ||
197 | |||
198 | v = CONFIG_KERNEL_START; | ||
199 | p = memory_start; | ||
200 | show_tmem(memory_size); | ||
201 | for (s = 0; s < memory_size; s += PAGE_SIZE) { | ||
202 | f = _PAGE_PRESENT | _PAGE_ACCESSED | | ||
203 | _PAGE_SHARED | _PAGE_HWEXEC; | ||
204 | if ((char *) v < _stext || (char *) v >= _etext) | ||
205 | f |= _PAGE_WRENABLE; | ||
206 | else | ||
207 | /* On the MicroBlaze, no user access | ||
208 | forces R/W kernel access */ | ||
209 | f |= _PAGE_USER; | ||
210 | map_page(v, p, f); | ||
211 | v += PAGE_SIZE; | ||
212 | p += PAGE_SIZE; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | /* is x a power of 2? */ | ||
217 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) | ||
218 | |||
219 | /* | ||
220 | * Set up a mapping for a block of I/O. | ||
221 | * virt, phys, size must all be page-aligned. | ||
222 | * This should only be called before ioremap is called. | ||
223 | */ | ||
224 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
225 | unsigned int size, int flags) | ||
226 | { | ||
227 | int i; | ||
228 | |||
229 | if (virt > CONFIG_KERNEL_START && virt < ioremap_bot) | ||
230 | ioremap_bot = ioremap_base = virt; | ||
231 | |||
232 | /* Put it in the page tables. */ | ||
233 | for (i = 0; i < size; i += PAGE_SIZE) | ||
234 | map_page(virt + i, phys + i, flags); | ||
235 | } | ||
236 | |||
237 | /* Scan the real Linux page tables and return a PTE pointer for | ||
238 | * a virtual address in a context. | ||
239 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | ||
240 | * the PTE pointer is unmodified if PTE is not found. | ||
241 | */ | ||
242 | static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | ||
243 | { | ||
244 | pgd_t *pgd; | ||
245 | pmd_t *pmd; | ||
246 | pte_t *pte; | ||
247 | int retval = 0; | ||
248 | |||
249 | pgd = pgd_offset(mm, addr & PAGE_MASK); | ||
250 | if (pgd) { | ||
251 | pmd = pmd_offset(pgd, addr & PAGE_MASK); | ||
252 | if (pmd_present(*pmd)) { | ||
253 | pte = pte_offset_kernel(pmd, addr & PAGE_MASK); | ||
254 | if (pte) { | ||
255 | retval = 1; | ||
256 | *ptep = pte; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | return retval; | ||
261 | } | ||
262 | |||
263 | /* Find physical address for this virtual address. Normally used by | ||
264 | * I/O functions, but anyone can call it. | ||
265 | */ | ||
266 | unsigned long iopa(unsigned long addr) | ||
267 | { | ||
268 | unsigned long pa; | ||
269 | |||
270 | pte_t *pte; | ||
271 | struct mm_struct *mm; | ||
272 | |||
273 | /* Allow mapping of user addresses (within the thread) | ||
274 | * for DMA if necessary. | ||
275 | */ | ||
276 | if (addr < TASK_SIZE) | ||
277 | mm = current->mm; | ||
278 | else | ||
279 | mm = &init_mm; | ||
280 | |||
281 | pa = 0; | ||
282 | if (get_pteptr(mm, addr, &pte)) | ||
283 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); | ||
284 | |||
285 | return pa; | ||
286 | } | ||