aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/microblaze
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/Kconfig83
-rw-r--r--arch/microblaze/Kconfig.debug3
-rw-r--r--arch/microblaze/Makefile7
-rw-r--r--arch/microblaze/boot/Makefile19
-rw-r--r--arch/microblaze/configs/mmu_defconfig164
-rw-r--r--arch/microblaze/configs/nommu_defconfig134
-rw-r--r--arch/microblaze/include/asm/asm-offsets.h1
-rw-r--r--arch/microblaze/include/asm/cache.h18
-rw-r--r--arch/microblaze/include/asm/cacheflush.h124
-rw-r--r--arch/microblaze/include/asm/cpuinfo.h5
-rw-r--r--arch/microblaze/include/asm/device.h16
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h154
-rw-r--r--arch/microblaze/include/asm/dma.h6
-rw-r--r--arch/microblaze/include/asm/elf.h1
-rw-r--r--arch/microblaze/include/asm/entry.h2
-rw-r--r--arch/microblaze/include/asm/exceptions.h6
-rw-r--r--arch/microblaze/include/asm/ftrace.h25
-rw-r--r--arch/microblaze/include/asm/futex.h127
-rw-r--r--arch/microblaze/include/asm/io.h40
-rw-r--r--arch/microblaze/include/asm/irq.h37
-rw-r--r--arch/microblaze/include/asm/irqflags.h112
-rw-r--r--arch/microblaze/include/asm/page.h27
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h195
-rw-r--r--arch/microblaze/include/asm/pci.h170
-rw-r--r--arch/microblaze/include/asm/pgalloc.h27
-rw-r--r--arch/microblaze/include/asm/pgtable.h73
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/include/asm/prom.h168
-rw-r--r--arch/microblaze/include/asm/ptrace.h14
-rw-r--r--arch/microblaze/include/asm/pvr.h30
-rw-r--r--arch/microblaze/include/asm/segment.h49
-rw-r--r--arch/microblaze/include/asm/setup.h2
-rw-r--r--arch/microblaze/include/asm/system.h5
-rw-r--r--arch/microblaze/include/asm/thread_info.h5
-rw-r--r--arch/microblaze/include/asm/tlbflush.h7
-rw-r--r--arch/microblaze/include/asm/uaccess.h498
-rw-r--r--arch/microblaze/include/asm/unistd.h5
-rw-r--r--arch/microblaze/kernel/Makefile16
-rw-r--r--arch/microblaze/kernel/asm-offsets.c2
-rw-r--r--arch/microblaze/kernel/cpu/Makefile4
-rw-r--r--arch/microblaze/kernel/cpu/cache.c762
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c15
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c17
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c8
-rw-r--r--arch/microblaze/kernel/cpu/mb.c18
-rw-r--r--arch/microblaze/kernel/cpu/pvr.c2
-rw-r--r--arch/microblaze/kernel/dma.c157
-rw-r--r--arch/microblaze/kernel/entry-nommu.S14
-rw-r--r--arch/microblaze/kernel/entry.S135
-rw-r--r--arch/microblaze/kernel/exceptions.c2
-rw-r--r--arch/microblaze/kernel/ftrace.c231
-rw-r--r--arch/microblaze/kernel/head.S26
-rw-r--r--arch/microblaze/kernel/heartbeat.c15
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/intc.c10
-rw-r--r--arch/microblaze/kernel/irq.c22
-rw-r--r--arch/microblaze/kernel/mcount.S170
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c16
-rw-r--r--arch/microblaze/kernel/misc.S43
-rw-r--r--arch/microblaze/kernel/module.c3
-rw-r--r--arch/microblaze/kernel/of_platform.c3
-rw-r--r--arch/microblaze/kernel/process.c11
-rw-r--r--arch/microblaze/kernel/prom.c1013
-rw-r--r--arch/microblaze/kernel/prom_parse.c2
-rw-r--r--arch/microblaze/kernel/ptrace.c65
-rw-r--r--arch/microblaze/kernel/reset.c140
-rw-r--r--arch/microblaze/kernel/setup.c86
-rw-r--r--arch/microblaze/kernel/signal.c35
-rw-r--r--arch/microblaze/kernel/stacktrace.c65
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c39
-rw-r--r--arch/microblaze/kernel/syscall_table.S9
-rw-r--r--arch/microblaze/kernel/timer.c28
-rw-r--r--arch/microblaze/kernel/traps.c40
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S9
-rw-r--r--arch/microblaze/lib/Makefile3
-rw-r--r--arch/microblaze/lib/fastcopy.S6
-rw-r--r--arch/microblaze/lib/memcpy.c4
-rw-r--r--arch/microblaze/lib/memmove.c2
-rw-r--r--arch/microblaze/lib/memset.c17
-rw-r--r--arch/microblaze/lib/uaccess.c41
-rw-r--r--arch/microblaze/lib/uaccess_old.S45
-rw-r--r--arch/microblaze/mm/Makefile2
-rw-r--r--arch/microblaze/mm/consistent.c255
-rw-r--r--arch/microblaze/mm/fault.c37
-rw-r--r--arch/microblaze/mm/init.c47
-rw-r--r--arch/microblaze/mm/pgtable.c66
-rw-r--r--arch/microblaze/oprofile/Makefile13
-rw-r--r--arch/microblaze/oprofile/microblaze_oprofile.c22
-rw-r--r--arch/microblaze/pci/Makefile6
-rw-r--r--arch/microblaze/pci/indirect_pci.c163
-rw-r--r--arch/microblaze/pci/iomap.c39
-rw-r--r--arch/microblaze/pci/pci-common.c1642
-rw-r--r--arch/microblaze/pci/pci_32.c431
-rw-r--r--arch/microblaze/pci/xilinx_pci.c168
-rw-r--r--arch/microblaze/platform/Kconfig.platform21
-rw-r--r--arch/microblaze/platform/generic/Kconfig.auto29
-rw-r--r--arch/microblaze/platform/generic/system.dts38
-rw-r--r--arch/microblaze/platform/platform.c2
98 files changed, 6259 insertions, 2545 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index bbd8327f1890..76818f926539 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,8 +6,17 @@ mainmenu "Linux/Microblaze Kernel Configuration"
6config MICROBLAZE 6config MICROBLAZE
7 def_bool y 7 def_bool y
8 select HAVE_LMB 8 select HAVE_LMB
9 select HAVE_FUNCTION_TRACER
10 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
11 select HAVE_FUNCTION_GRAPH_TRACER
12 select HAVE_DYNAMIC_FTRACE
13 select HAVE_FTRACE_MCOUNT_RECORD
9 select USB_ARCH_HAS_EHCI 14 select USB_ARCH_HAS_EHCI
10 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select HAVE_OPROFILE
17 select HAVE_DMA_ATTRS
18 select HAVE_DMA_API_DEBUG
19 select TRACING_SUPPORT
11 20
12config SWAP 21config SWAP
13 def_bool n 22 def_bool n
@@ -57,10 +66,16 @@ config GENERIC_GPIO
57config GENERIC_CSUM 66config GENERIC_CSUM
58 def_bool y 67 def_bool y
59 68
60config PCI 69config STACKTRACE_SUPPORT
61 def_bool n 70 def_bool y
71
72config LOCKDEP_SUPPORT
73 def_bool y
74
75config HAVE_LATENCYTOP_SUPPORT
76 def_bool y
62 77
63config NO_DMA 78config DTC
64 def_bool y 79 def_bool y
65 80
66source "init/Kconfig" 81source "init/Kconfig"
@@ -71,7 +86,7 @@ source "arch/microblaze/platform/Kconfig.platform"
71 86
72menu "Processor type and features" 87menu "Processor type and features"
73 88
74source kernel/time/Kconfig 89source "kernel/time/Kconfig"
75 90
76source "kernel/Kconfig.preempt" 91source "kernel/Kconfig.preempt"
77 92
@@ -111,6 +126,7 @@ config CMDLINE_FORCE
111 126
112config OF 127config OF
113 def_bool y 128 def_bool y
129 select OF_FLATTREE
114 130
115config PROC_DEVICETREE 131config PROC_DEVICETREE
116 bool "Support for device tree in /proc" 132 bool "Support for device tree in /proc"
@@ -126,7 +142,6 @@ menu "Advanced setup"
126 142
127config ADVANCED_OPTIONS 143config ADVANCED_OPTIONS
128 bool "Prompt for advanced kernel configuration options" 144 bool "Prompt for advanced kernel configuration options"
129 depends on MMU
130 help 145 help
131 This option will enable prompting for a variety of advanced kernel 146 This option will enable prompting for a variety of advanced kernel
132 configuration options. These options can cause the kernel to not 147 configuration options. These options can cause the kernel to not
@@ -138,6 +153,15 @@ config ADVANCED_OPTIONS
138comment "Default settings for advanced configuration options are used" 153comment "Default settings for advanced configuration options are used"
139 depends on !ADVANCED_OPTIONS 154 depends on !ADVANCED_OPTIONS
140 155
156config XILINX_UNCACHED_SHADOW
157 bool "Are you using uncached shadow for RAM ?"
158 depends on ADVANCED_OPTIONS && !MMU
159 default n
160 help
161 This is needed to be able to allocate uncachable memory regions.
162 The feature requires the design to define the RAM memory controller
163 window to be twice as large as the actual physical memory.
164
141config HIGHMEM_START_BOOL 165config HIGHMEM_START_BOOL
142 bool "Set high memory pool address" 166 bool "Set high memory pool address"
143 depends on ADVANCED_OPTIONS && HIGHMEM 167 depends on ADVANCED_OPTIONS && HIGHMEM
@@ -155,7 +179,7 @@ config HIGHMEM_START
155 179
156config LOWMEM_SIZE_BOOL 180config LOWMEM_SIZE_BOOL
157 bool "Set maximum low memory" 181 bool "Set maximum low memory"
158 depends on ADVANCED_OPTIONS 182 depends on ADVANCED_OPTIONS && MMU
159 help 183 help
160 This option allows you to set the maximum amount of memory which 184 This option allows you to set the maximum amount of memory which
161 will be used as "low memory", that is, memory which the kernel can 185 will be used as "low memory", that is, memory which the kernel can
@@ -167,7 +191,6 @@ config LOWMEM_SIZE_BOOL
167 191
168config LOWMEM_SIZE 192config LOWMEM_SIZE
169 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 193 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
170 depends on MMU
171 default "0x30000000" 194 default "0x30000000"
172 195
173config KERNEL_START_BOOL 196config KERNEL_START_BOOL
@@ -188,7 +211,7 @@ config KERNEL_START
188 211
189config TASK_SIZE_BOOL 212config TASK_SIZE_BOOL
190 bool "Set custom user task size" 213 bool "Set custom user task size"
191 depends on ADVANCED_OPTIONS 214 depends on ADVANCED_OPTIONS && MMU
192 help 215 help
193 This option allows you to set the amount of virtual address space 216 This option allows you to set the amount of virtual address space
194 allocated to user tasks. This can be useful in optimizing the 217 allocated to user tasks. This can be useful in optimizing the
@@ -198,42 +221,34 @@ config TASK_SIZE_BOOL
198 221
199config TASK_SIZE 222config TASK_SIZE
200 hex "Size of user task space" if TASK_SIZE_BOOL 223 hex "Size of user task space" if TASK_SIZE_BOOL
201 depends on MMU
202 default "0x80000000" 224 default "0x80000000"
203 225
204config CONSISTENT_START_BOOL 226endmenu
205 bool "Set custom consistent memory pool address"
206 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
207 help
208 This option allows you to set the base virtual address
209 of the the consistent memory pool. This pool of virtual
210 memory is used to make consistent memory allocations.
211 227
212config CONSISTENT_START 228source "mm/Kconfig"
213 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
214 depends on MMU
215 default "0xff100000" if NOT_COHERENT_CACHE
216 229
217config CONSISTENT_SIZE_BOOL 230menu "Exectuable file formats"
218 bool "Set custom consistent memory pool size"
219 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
220 help
221 This option allows you to set the size of the the
222 consistent memory pool. This pool of virtual memory
223 is used to make consistent memory allocations.
224 231
225config CONSISTENT_SIZE 232source "fs/Kconfig.binfmt"
226 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
227 depends on MMU
228 default "0x00200000" if NOT_COHERENT_CACHE
229 233
230endmenu 234endmenu
231 235
232source "mm/Kconfig" 236menu "Bus Options"
233 237
234menu "Exectuable file formats" 238config PCI
239 bool "PCI support"
235 240
236source "fs/Kconfig.binfmt" 241config PCI_DOMAINS
242 def_bool PCI
243
244config PCI_SYSCALL
245 def_bool PCI
246
247config PCI_XILINX
248 bool "Xilinx PCI host bridge support"
249 depends on PCI
250
251source "drivers/pci/Kconfig"
237 252
238endmenu 253endmenu
239 254
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index 242cd35bdb4b..9dc708a7f700 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -3,6 +3,9 @@
3 3
4menu "Kernel hacking" 4menu "Kernel hacking"
5 5
6config TRACE_IRQFLAGS_SUPPORT
7 def_bool y
8
6source "lib/Kconfig.debug" 9source "lib/Kconfig.debug"
7 10
8config EARLY_PRINTK 11config EARLY_PRINTK
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 34187354304a..72f6e8583746 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -50,6 +50,9 @@ libs-y += $(LIBGCC)
50core-y += arch/microblaze/kernel/ 50core-y += arch/microblaze/kernel/
51core-y += arch/microblaze/mm/ 51core-y += arch/microblaze/mm/
52core-y += arch/microblaze/platform/ 52core-y += arch/microblaze/platform/
53core-$(CONFIG_PCI) += arch/microblaze/pci/
54
55drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
53 56
54boot := arch/microblaze/boot 57boot := arch/microblaze/boot
55 58
@@ -81,7 +84,7 @@ define archhelp
81 echo '* linux.bin - Create raw binary' 84 echo '* linux.bin - Create raw binary'
82 echo ' linux.bin.gz - Create compressed raw binary' 85 echo ' linux.bin.gz - Create compressed raw binary'
83 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' 86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
84 echo ' - stripped elf with fdt blob 87 echo ' - stripped elf with fdt blob'
85 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' 88 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
86 echo ' *_defconfig - Select default config from arch/microblaze/configs' 89 echo ' *_defconfig - Select default config from arch/microblaze/configs'
87 echo '' 90 echo ''
@@ -91,3 +94,5 @@ define archhelp
91 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' 94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
92 echo ' (minus the .dts extension).' 95 echo ' (minus the .dts extension).'
93endef 96endef
97
98MRPROPER_FILES += $(boot)/simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 21f13322a4ca..57f50c2371c6 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,11 +2,13 @@
2# arch/microblaze/boot/Makefile 2# arch/microblaze/boot/Makefile
3# 3#
4 4
5MKIMAGE := $(srctree)/scripts/mkuboot.sh
6
5obj-y += linked_dtb.o 7obj-y += linked_dtb.o
6 8
7targets := linux.bin linux.bin.gz simpleImage.% 9targets := linux.bin linux.bin.gz simpleImage.%
8 10
9OBJCOPYFLAGS_linux.bin := -O binary 11OBJCOPYFLAGS := -O binary
10 12
11# Where the DTS files live 13# Where the DTS files live
12dtstree := $(srctree)/$(src)/dts 14dtstree := $(srctree)/$(src)/dts
@@ -21,9 +23,8 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
21endif 23endif
22 24
23$(obj)/linux.bin: vmlinux FORCE 25$(obj)/linux.bin: vmlinux FORCE
24 [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
25 touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
26 $(call if_changed,objcopy) 26 $(call if_changed,objcopy)
27 $(call if_changed,uimage)
27 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 28 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
28 29
29$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE 30$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
@@ -36,8 +37,16 @@ quiet_cmd_cp = CP $< $@$2
36quiet_cmd_strip = STRIP $@ 37quiet_cmd_strip = STRIP $@
37 cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@ 38 cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@
38 39
40quiet_cmd_uimage = UIMAGE $@.ub
41 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \
42 -C none -n 'Linux-$(KERNELRELEASE)' \
43 -a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \
44 -d $@ $@.ub
45
39$(obj)/simpleImage.%: vmlinux FORCE 46$(obj)/simpleImage.%: vmlinux FORCE
40 $(call if_changed,cp,.unstrip) 47 $(call if_changed,cp,.unstrip)
48 $(call if_changed,objcopy)
49 $(call if_changed,uimage)
41 $(call if_changed,strip) 50 $(call if_changed,strip)
42 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 51 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
43 52
@@ -51,6 +60,4 @@ quiet_cmd_dtc = DTC $@
51$(obj)/%.dtb: $(dtstree)/%.dts FORCE 60$(obj)/%.dtb: $(dtstree)/%.dts FORCE
52 $(call if_changed,dtc) 61 $(call if_changed,dtc)
53 62
54clean-kernel += linux.bin linux.bin.gz simpleImage.* 63clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
55
56clean-files += *.dtb
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index bb7c374713ad..3c91cf6192c6 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.34-rc6
4# Thu Sep 24 10:28:50 2009 4# Thu May 6 11:22:14 2010
5# 5#
6CONFIG_MICROBLAZE=y 6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set 7# CONFIG_SWAP is not set
@@ -19,8 +19,10 @@ CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y 20CONFIG_GENERIC_GPIO=y
21CONFIG_GENERIC_CSUM=y 21CONFIG_GENERIC_CSUM=y
22# CONFIG_PCI is not set 22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_NO_DMA=y 23CONFIG_LOCKDEP_SUPPORT=y
24CONFIG_HAVE_LATENCYTOP_SUPPORT=y
25CONFIG_DTC=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 26CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25CONFIG_CONSTRUCTORS=y 27CONFIG_CONSTRUCTORS=y
26 28
@@ -44,6 +46,7 @@ CONFIG_SYSVIPC_SYSCTL=y
44# 46#
45CONFIG_TREE_RCU=y 47CONFIG_TREE_RCU=y
46# CONFIG_TREE_PREEMPT_RCU is not set 48# CONFIG_TREE_PREEMPT_RCU is not set
49# CONFIG_TINY_RCU is not set
47# CONFIG_RCU_TRACE is not set 50# CONFIG_RCU_TRACE is not set
48CONFIG_RCU_FANOUT=32 51CONFIG_RCU_FANOUT=32
49# CONFIG_RCU_FANOUT_EXACT is not set 52# CONFIG_RCU_FANOUT_EXACT is not set
@@ -51,7 +54,6 @@ CONFIG_RCU_FANOUT=32
51CONFIG_IKCONFIG=y 54CONFIG_IKCONFIG=y
52CONFIG_IKCONFIG_PROC=y 55CONFIG_IKCONFIG_PROC=y
53CONFIG_LOG_BUF_SHIFT=17 56CONFIG_LOG_BUF_SHIFT=17
54# CONFIG_GROUP_SCHED is not set
55# CONFIG_CGROUPS is not set 57# CONFIG_CGROUPS is not set
56CONFIG_SYSFS_DEPRECATED=y 58CONFIG_SYSFS_DEPRECATED=y
57CONFIG_SYSFS_DEPRECATED_V2=y 59CONFIG_SYSFS_DEPRECATED_V2=y
@@ -64,10 +66,12 @@ CONFIG_INITRAMFS_ROOT_GID=0
64CONFIG_RD_GZIP=y 66CONFIG_RD_GZIP=y
65# CONFIG_RD_BZIP2 is not set 67# CONFIG_RD_BZIP2 is not set
66# CONFIG_RD_LZMA is not set 68# CONFIG_RD_LZMA is not set
69# CONFIG_RD_LZO is not set
67# CONFIG_INITRAMFS_COMPRESSION_NONE is not set 70# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
68CONFIG_INITRAMFS_COMPRESSION_GZIP=y 71CONFIG_INITRAMFS_COMPRESSION_GZIP=y
69# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set 72# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
70# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set 73# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
74# CONFIG_INITRAMFS_COMPRESSION_LZO is not set
71# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 75# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
72CONFIG_SYSCTL=y 76CONFIG_SYSCTL=y
73CONFIG_ANON_INODES=y 77CONFIG_ANON_INODES=y
@@ -90,21 +94,22 @@ CONFIG_EVENTFD=y
90CONFIG_AIO=y 94CONFIG_AIO=y
91 95
92# 96#
93# Performance Counters 97# Kernel Performance Events And Counters
94# 98#
95CONFIG_VM_EVENT_COUNTERS=y 99CONFIG_VM_EVENT_COUNTERS=y
96# CONFIG_STRIP_ASM_SYMS is not set
97CONFIG_COMPAT_BRK=y 100CONFIG_COMPAT_BRK=y
98CONFIG_SLAB=y 101CONFIG_SLAB=y
99# CONFIG_SLUB is not set 102# CONFIG_SLUB is not set
100# CONFIG_SLOB is not set 103# CONFIG_SLOB is not set
101# CONFIG_PROFILING is not set 104# CONFIG_PROFILING is not set
102# CONFIG_MARKERS is not set 105CONFIG_HAVE_OPROFILE=y
106CONFIG_HAVE_DMA_ATTRS=y
107CONFIG_HAVE_DMA_API_DEBUG=y
103 108
104# 109#
105# GCOV-based kernel profiling 110# GCOV-based kernel profiling
106# 111#
107# CONFIG_SLOW_WORK is not set 112CONFIG_SLOW_WORK=y
108# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 113# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
109CONFIG_SLABINFO=y 114CONFIG_SLABINFO=y
110CONFIG_BASE_SMALL=1 115CONFIG_BASE_SMALL=1
@@ -123,14 +128,41 @@ CONFIG_LBDAF=y
123# IO Schedulers 128# IO Schedulers
124# 129#
125CONFIG_IOSCHED_NOOP=y 130CONFIG_IOSCHED_NOOP=y
126CONFIG_IOSCHED_AS=y
127CONFIG_IOSCHED_DEADLINE=y 131CONFIG_IOSCHED_DEADLINE=y
128CONFIG_IOSCHED_CFQ=y 132CONFIG_IOSCHED_CFQ=y
129# CONFIG_DEFAULT_AS is not set
130# CONFIG_DEFAULT_DEADLINE is not set 133# CONFIG_DEFAULT_DEADLINE is not set
131CONFIG_DEFAULT_CFQ=y 134CONFIG_DEFAULT_CFQ=y
132# CONFIG_DEFAULT_NOOP is not set 135# CONFIG_DEFAULT_NOOP is not set
133CONFIG_DEFAULT_IOSCHED="cfq" 136CONFIG_DEFAULT_IOSCHED="cfq"
137# CONFIG_INLINE_SPIN_TRYLOCK is not set
138# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
139# CONFIG_INLINE_SPIN_LOCK is not set
140# CONFIG_INLINE_SPIN_LOCK_BH is not set
141# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
142# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
143# CONFIG_INLINE_SPIN_UNLOCK is not set
144# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
145# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
146# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
147# CONFIG_INLINE_READ_TRYLOCK is not set
148# CONFIG_INLINE_READ_LOCK is not set
149# CONFIG_INLINE_READ_LOCK_BH is not set
150# CONFIG_INLINE_READ_LOCK_IRQ is not set
151# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
152# CONFIG_INLINE_READ_UNLOCK is not set
153# CONFIG_INLINE_READ_UNLOCK_BH is not set
154# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
155# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
156# CONFIG_INLINE_WRITE_TRYLOCK is not set
157# CONFIG_INLINE_WRITE_LOCK is not set
158# CONFIG_INLINE_WRITE_LOCK_BH is not set
159# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
160# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
161# CONFIG_INLINE_WRITE_UNLOCK is not set
162# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
163# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
164# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
165# CONFIG_MUTEX_SPIN_ON_OWNER is not set
134# CONFIG_FREEZER is not set 166# CONFIG_FREEZER is not set
135 167
136# 168#
@@ -139,11 +171,6 @@ CONFIG_DEFAULT_IOSCHED="cfq"
139CONFIG_PLATFORM_GENERIC=y 171CONFIG_PLATFORM_GENERIC=y
140CONFIG_OPT_LIB_FUNCTION=y 172CONFIG_OPT_LIB_FUNCTION=y
141CONFIG_OPT_LIB_ASM=y 173CONFIG_OPT_LIB_ASM=y
142CONFIG_ALLOW_EDIT_AUTO=y
143
144#
145# Automatic platform settings from Kconfig.auto
146#
147 174
148# 175#
149# Definitions for MICROBLAZE0 176# Definitions for MICROBLAZE0
@@ -203,12 +230,11 @@ CONFIG_FLATMEM_MANUAL=y
203CONFIG_FLATMEM=y 230CONFIG_FLATMEM=y
204CONFIG_FLAT_NODE_MEM_MAP=y 231CONFIG_FLAT_NODE_MEM_MAP=y
205CONFIG_PAGEFLAGS_EXTENDED=y 232CONFIG_PAGEFLAGS_EXTENDED=y
206CONFIG_SPLIT_PTLOCK_CPUS=4 233CONFIG_SPLIT_PTLOCK_CPUS=999999
207# CONFIG_PHYS_ADDR_T_64BIT is not set 234# CONFIG_PHYS_ADDR_T_64BIT is not set
208CONFIG_ZONE_DMA_FLAG=0 235CONFIG_ZONE_DMA_FLAG=0
209CONFIG_VIRT_TO_BUS=y 236CONFIG_VIRT_TO_BUS=y
210CONFIG_HAVE_MLOCK=y 237# CONFIG_KSM is not set
211CONFIG_HAVE_MLOCKED_PAGE_BIT=y
212CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 238CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
213 239
214# 240#
@@ -218,13 +244,20 @@ CONFIG_BINFMT_ELF=y
218# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 244# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
219# CONFIG_HAVE_AOUT is not set 245# CONFIG_HAVE_AOUT is not set
220# CONFIG_BINFMT_MISC is not set 246# CONFIG_BINFMT_MISC is not set
247
248#
249# Bus Options
250#
251# CONFIG_PCI is not set
252# CONFIG_PCI_DOMAINS is not set
253# CONFIG_PCI_SYSCALL is not set
254# CONFIG_ARCH_SUPPORTS_MSI is not set
221CONFIG_NET=y 255CONFIG_NET=y
222 256
223# 257#
224# Networking options 258# Networking options
225# 259#
226CONFIG_PACKET=y 260CONFIG_PACKET=y
227# CONFIG_PACKET_MMAP is not set
228CONFIG_UNIX=y 261CONFIG_UNIX=y
229CONFIG_XFRM=y 262CONFIG_XFRM=y
230# CONFIG_XFRM_USER is not set 263# CONFIG_XFRM_USER is not set
@@ -289,7 +322,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
289# CONFIG_IRDA is not set 322# CONFIG_IRDA is not set
290# CONFIG_BT is not set 323# CONFIG_BT is not set
291# CONFIG_AF_RXRPC is not set 324# CONFIG_AF_RXRPC is not set
292# CONFIG_WIRELESS is not set 325CONFIG_WIRELESS=y
326# CONFIG_CFG80211 is not set
327# CONFIG_LIB80211 is not set
328
329#
330# CFG80211 needs to be enabled for MAC80211
331#
293# CONFIG_WIMAX is not set 332# CONFIG_WIMAX is not set
294# CONFIG_RFKILL is not set 333# CONFIG_RFKILL is not set
295# CONFIG_NET_9P is not set 334# CONFIG_NET_9P is not set
@@ -308,11 +347,17 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
308# CONFIG_SYS_HYPERVISOR is not set 347# CONFIG_SYS_HYPERVISOR is not set
309# CONFIG_CONNECTOR is not set 348# CONFIG_CONNECTOR is not set
310# CONFIG_MTD is not set 349# CONFIG_MTD is not set
350CONFIG_OF_FLATTREE=y
311CONFIG_OF_DEVICE=y 351CONFIG_OF_DEVICE=y
352CONFIG_OF_MDIO=y
312# CONFIG_PARPORT is not set 353# CONFIG_PARPORT is not set
313CONFIG_BLK_DEV=y 354CONFIG_BLK_DEV=y
314# CONFIG_BLK_DEV_COW_COMMON is not set 355# CONFIG_BLK_DEV_COW_COMMON is not set
315# CONFIG_BLK_DEV_LOOP is not set 356# CONFIG_BLK_DEV_LOOP is not set
357
358#
359# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
360#
316# CONFIG_BLK_DEV_NBD is not set 361# CONFIG_BLK_DEV_NBD is not set
317CONFIG_BLK_DEV_RAM=y 362CONFIG_BLK_DEV_RAM=y
318CONFIG_BLK_DEV_RAM_COUNT=16 363CONFIG_BLK_DEV_RAM_COUNT=16
@@ -333,6 +378,7 @@ CONFIG_MISC_DEVICES=y
333# 378#
334# SCSI device support 379# SCSI device support
335# 380#
381CONFIG_SCSI_MOD=y
336# CONFIG_RAID_ATTRS is not set 382# CONFIG_RAID_ATTRS is not set
337# CONFIG_SCSI is not set 383# CONFIG_SCSI is not set
338# CONFIG_SCSI_DMA is not set 384# CONFIG_SCSI_DMA is not set
@@ -346,7 +392,27 @@ CONFIG_NETDEVICES=y
346# CONFIG_EQUALIZER is not set 392# CONFIG_EQUALIZER is not set
347# CONFIG_TUN is not set 393# CONFIG_TUN is not set
348# CONFIG_VETH is not set 394# CONFIG_VETH is not set
349# CONFIG_PHYLIB is not set 395CONFIG_PHYLIB=y
396
397#
398# MII PHY device drivers
399#
400# CONFIG_MARVELL_PHY is not set
401# CONFIG_DAVICOM_PHY is not set
402# CONFIG_QSEMI_PHY is not set
403# CONFIG_LXT_PHY is not set
404# CONFIG_CICADA_PHY is not set
405# CONFIG_VITESSE_PHY is not set
406# CONFIG_SMSC_PHY is not set
407# CONFIG_BROADCOM_PHY is not set
408# CONFIG_ICPLUS_PHY is not set
409# CONFIG_REALTEK_PHY is not set
410# CONFIG_NATIONAL_PHY is not set
411# CONFIG_STE10XP is not set
412# CONFIG_LSI_ET1011C_PHY is not set
413# CONFIG_MICREL_PHY is not set
414# CONFIG_FIXED_PHY is not set
415# CONFIG_MDIO_BITBANG is not set
350CONFIG_NET_ETHERNET=y 416CONFIG_NET_ETHERNET=y
351# CONFIG_MII is not set 417# CONFIG_MII is not set
352# CONFIG_ETHOC is not set 418# CONFIG_ETHOC is not set
@@ -358,13 +424,14 @@ CONFIG_NET_ETHERNET=y
358# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set 424# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
359# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 425# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
360# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 426# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
427# CONFIG_B44 is not set
361# CONFIG_KS8842 is not set 428# CONFIG_KS8842 is not set
429# CONFIG_KS8851_MLL is not set
362CONFIG_XILINX_EMACLITE=y 430CONFIG_XILINX_EMACLITE=y
363CONFIG_NETDEV_1000=y 431CONFIG_NETDEV_1000=y
364CONFIG_NETDEV_10000=y 432CONFIG_NETDEV_10000=y
365CONFIG_WLAN=y 433CONFIG_WLAN=y
366# CONFIG_WLAN_PRE80211 is not set 434# CONFIG_HOSTAP is not set
367# CONFIG_WLAN_80211 is not set
368 435
369# 436#
370# Enable WiMAX (Networking options) to see the WiMAX drivers 437# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -408,6 +475,8 @@ CONFIG_SERIAL_UARTLITE=y
408CONFIG_SERIAL_UARTLITE_CONSOLE=y 475CONFIG_SERIAL_UARTLITE_CONSOLE=y
409CONFIG_SERIAL_CORE=y 476CONFIG_SERIAL_CORE=y
410CONFIG_SERIAL_CORE_CONSOLE=y 477CONFIG_SERIAL_CORE_CONSOLE=y
478# CONFIG_SERIAL_TIMBERDALE is not set
479# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
411CONFIG_UNIX98_PTYS=y 480CONFIG_UNIX98_PTYS=y
412# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 481# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
413CONFIG_LEGACY_PTYS=y 482CONFIG_LEGACY_PTYS=y
@@ -433,8 +502,13 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
433# CONFIG_POWER_SUPPLY is not set 502# CONFIG_POWER_SUPPLY is not set
434# CONFIG_HWMON is not set 503# CONFIG_HWMON is not set
435# CONFIG_THERMAL is not set 504# CONFIG_THERMAL is not set
436# CONFIG_THERMAL_HWMON is not set
437# CONFIG_WATCHDOG is not set 505# CONFIG_WATCHDOG is not set
506CONFIG_SSB_POSSIBLE=y
507
508#
509# Sonics Silicon Backplane
510#
511# CONFIG_SSB is not set
438 512
439# 513#
440# Multifunction device drivers 514# Multifunction device drivers
@@ -466,6 +540,7 @@ CONFIG_USB_ARCH_HAS_EHCI=y
466# CONFIG_NEW_LEDS is not set 540# CONFIG_NEW_LEDS is not set
467# CONFIG_ACCESSIBILITY is not set 541# CONFIG_ACCESSIBILITY is not set
468# CONFIG_RTC_CLASS is not set 542# CONFIG_RTC_CLASS is not set
543# CONFIG_DMADEVICES is not set
469# CONFIG_AUXDISPLAY is not set 544# CONFIG_AUXDISPLAY is not set
470# CONFIG_UIO is not set 545# CONFIG_UIO is not set
471 546
@@ -526,8 +601,6 @@ CONFIG_PROC_FS=y
526CONFIG_PROC_SYSCTL=y 601CONFIG_PROC_SYSCTL=y
527CONFIG_PROC_PAGE_MONITOR=y 602CONFIG_PROC_PAGE_MONITOR=y
528CONFIG_SYSFS=y 603CONFIG_SYSFS=y
529CONFIG_TMPFS=y
530# CONFIG_TMPFS_POSIX_ACL is not set
531# CONFIG_HUGETLB_PAGE is not set 604# CONFIG_HUGETLB_PAGE is not set
532# CONFIG_CONFIGFS_FS is not set 605# CONFIG_CONFIGFS_FS is not set
533CONFIG_MISC_FILESYSTEMS=y 606CONFIG_MISC_FILESYSTEMS=y
@@ -538,6 +611,7 @@ CONFIG_MISC_FILESYSTEMS=y
538# CONFIG_BEFS_FS is not set 611# CONFIG_BEFS_FS is not set
539# CONFIG_BFS_FS is not set 612# CONFIG_BFS_FS is not set
540# CONFIG_EFS_FS is not set 613# CONFIG_EFS_FS is not set
614# CONFIG_LOGFS is not set
541# CONFIG_CRAMFS is not set 615# CONFIG_CRAMFS is not set
542# CONFIG_SQUASHFS is not set 616# CONFIG_SQUASHFS is not set
543# CONFIG_VXFS_FS is not set 617# CONFIG_VXFS_FS is not set
@@ -561,6 +635,7 @@ CONFIG_SUNRPC=y
561# CONFIG_RPCSEC_GSS_KRB5 is not set 635# CONFIG_RPCSEC_GSS_KRB5 is not set
562# CONFIG_RPCSEC_GSS_SPKM3 is not set 636# CONFIG_RPCSEC_GSS_SPKM3 is not set
563# CONFIG_SMB_FS is not set 637# CONFIG_SMB_FS is not set
638# CONFIG_CEPH_FS is not set
564CONFIG_CIFS=y 639CONFIG_CIFS=y
565CONFIG_CIFS_STATS=y 640CONFIG_CIFS_STATS=y
566CONFIG_CIFS_STATS2=y 641CONFIG_CIFS_STATS2=y
@@ -638,11 +713,13 @@ CONFIG_NLS_DEFAULT="iso8859-1"
638# 713#
639# Kernel hacking 714# Kernel hacking
640# 715#
716CONFIG_TRACE_IRQFLAGS_SUPPORT=y
641# CONFIG_PRINTK_TIME is not set 717# CONFIG_PRINTK_TIME is not set
642CONFIG_ENABLE_WARN_DEPRECATED=y 718CONFIG_ENABLE_WARN_DEPRECATED=y
643CONFIG_ENABLE_MUST_CHECK=y 719CONFIG_ENABLE_MUST_CHECK=y
644CONFIG_FRAME_WARN=1024 720CONFIG_FRAME_WARN=1024
645# CONFIG_MAGIC_SYSRQ is not set 721# CONFIG_MAGIC_SYSRQ is not set
722# CONFIG_STRIP_ASM_SYMS is not set
646# CONFIG_UNUSED_SYMBOLS is not set 723# CONFIG_UNUSED_SYMBOLS is not set
647# CONFIG_DEBUG_FS is not set 724# CONFIG_DEBUG_FS is not set
648# CONFIG_HEADERS_CHECK is not set 725# CONFIG_HEADERS_CHECK is not set
@@ -660,8 +737,12 @@ CONFIG_SCHED_DEBUG=y
660# CONFIG_DEBUG_OBJECTS is not set 737# CONFIG_DEBUG_OBJECTS is not set
661CONFIG_DEBUG_SLAB=y 738CONFIG_DEBUG_SLAB=y
662# CONFIG_DEBUG_SLAB_LEAK is not set 739# CONFIG_DEBUG_SLAB_LEAK is not set
740# CONFIG_DEBUG_KMEMLEAK is not set
663CONFIG_DEBUG_SPINLOCK=y 741CONFIG_DEBUG_SPINLOCK=y
664# CONFIG_DEBUG_MUTEXES is not set 742# CONFIG_DEBUG_MUTEXES is not set
743# CONFIG_DEBUG_LOCK_ALLOC is not set
744# CONFIG_PROVE_LOCKING is not set
745# CONFIG_LOCK_STAT is not set
665# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 746# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
666# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 747# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
667# CONFIG_DEBUG_KOBJECT is not set 748# CONFIG_DEBUG_KOBJECT is not set
@@ -680,10 +761,30 @@ CONFIG_DEBUG_INFO=y
680# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 761# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
681# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set 762# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
682# CONFIG_FAULT_INJECTION is not set 763# CONFIG_FAULT_INJECTION is not set
764# CONFIG_LATENCYTOP is not set
683# CONFIG_SYSCTL_SYSCALL_CHECK is not set 765# CONFIG_SYSCTL_SYSCALL_CHECK is not set
684# CONFIG_PAGE_POISONING is not set 766# CONFIG_PAGE_POISONING is not set
767CONFIG_HAVE_FUNCTION_TRACER=y
768CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
769CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
770CONFIG_HAVE_DYNAMIC_FTRACE=y
771CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
772CONFIG_TRACING_SUPPORT=y
773CONFIG_FTRACE=y
774# CONFIG_FUNCTION_TRACER is not set
775# CONFIG_IRQSOFF_TRACER is not set
776# CONFIG_SCHED_TRACER is not set
777# CONFIG_ENABLE_DEFAULT_TRACERS is not set
778# CONFIG_BOOT_TRACER is not set
779CONFIG_BRANCH_PROFILE_NONE=y
780# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
781# CONFIG_PROFILE_ALL_BRANCHES is not set
782# CONFIG_STACK_TRACER is not set
783# CONFIG_KMEMTRACE is not set
784# CONFIG_WORKQUEUE_TRACER is not set
785# CONFIG_BLK_DEV_IO_TRACE is not set
786# CONFIG_DMA_API_DEBUG is not set
685# CONFIG_SAMPLES is not set 787# CONFIG_SAMPLES is not set
686# CONFIG_KMEMCHECK is not set
687CONFIG_EARLY_PRINTK=y 788CONFIG_EARLY_PRINTK=y
688# CONFIG_HEART_BEAT is not set 789# CONFIG_HEART_BEAT is not set
689CONFIG_DEBUG_BOOTMEM=y 790CONFIG_DEBUG_BOOTMEM=y
@@ -694,7 +795,11 @@ CONFIG_DEBUG_BOOTMEM=y
694# CONFIG_KEYS is not set 795# CONFIG_KEYS is not set
695# CONFIG_SECURITY is not set 796# CONFIG_SECURITY is not set
696# CONFIG_SECURITYFS is not set 797# CONFIG_SECURITYFS is not set
697# CONFIG_SECURITY_FILE_CAPABILITIES is not set 798# CONFIG_DEFAULT_SECURITY_SELINUX is not set
799# CONFIG_DEFAULT_SECURITY_SMACK is not set
800# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
801CONFIG_DEFAULT_SECURITY_DAC=y
802CONFIG_DEFAULT_SECURITY=""
698CONFIG_CRYPTO=y 803CONFIG_CRYPTO=y
699 804
700# 805#
@@ -800,5 +905,6 @@ CONFIG_ZLIB_INFLATE=y
800CONFIG_DECOMPRESS_GZIP=y 905CONFIG_DECOMPRESS_GZIP=y
801CONFIG_HAS_IOMEM=y 906CONFIG_HAS_IOMEM=y
802CONFIG_HAS_IOPORT=y 907CONFIG_HAS_IOPORT=y
908CONFIG_HAS_DMA=y
803CONFIG_HAVE_LMB=y 909CONFIG_HAVE_LMB=y
804CONFIG_NLATTR=y 910CONFIG_NLATTR=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index adb839bab704..dd3a494257f4 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.34-rc6
4# Thu Sep 24 10:29:43 2009 4# Thu May 6 11:25:12 2010
5# 5#
6CONFIG_MICROBLAZE=y 6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set 7# CONFIG_SWAP is not set
@@ -19,8 +19,10 @@ CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y 20CONFIG_GENERIC_GPIO=y
21CONFIG_GENERIC_CSUM=y 21CONFIG_GENERIC_CSUM=y
22# CONFIG_PCI is not set 22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_NO_DMA=y 23CONFIG_LOCKDEP_SUPPORT=y
24CONFIG_HAVE_LATENCYTOP_SUPPORT=y
25CONFIG_DTC=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 26CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25CONFIG_CONSTRUCTORS=y 27CONFIG_CONSTRUCTORS=y
26 28
@@ -46,6 +48,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
46# 48#
47CONFIG_TREE_RCU=y 49CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set 50# CONFIG_TREE_PREEMPT_RCU is not set
51# CONFIG_TINY_RCU is not set
49# CONFIG_RCU_TRACE is not set 52# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32 53CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set 54# CONFIG_RCU_FANOUT_EXACT is not set
@@ -53,7 +56,6 @@ CONFIG_RCU_FANOUT=32
53CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
54CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
55CONFIG_LOG_BUF_SHIFT=17 58CONFIG_LOG_BUF_SHIFT=17
56# CONFIG_GROUP_SCHED is not set
57# CONFIG_CGROUPS is not set 59# CONFIG_CGROUPS is not set
58CONFIG_SYSFS_DEPRECATED=y 60CONFIG_SYSFS_DEPRECATED=y
59CONFIG_SYSFS_DEPRECATED_V2=y 61CONFIG_SYSFS_DEPRECATED_V2=y
@@ -81,16 +83,18 @@ CONFIG_EVENTFD=y
81CONFIG_AIO=y 83CONFIG_AIO=y
82 84
83# 85#
84# Performance Counters 86# Kernel Performance Events And Counters
85# 87#
86CONFIG_VM_EVENT_COUNTERS=y 88CONFIG_VM_EVENT_COUNTERS=y
87# CONFIG_STRIP_ASM_SYMS is not set
88CONFIG_COMPAT_BRK=y 89CONFIG_COMPAT_BRK=y
89CONFIG_SLAB=y 90CONFIG_SLAB=y
90# CONFIG_SLUB is not set 91# CONFIG_SLUB is not set
91# CONFIG_SLOB is not set 92# CONFIG_SLOB is not set
93# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
92# CONFIG_PROFILING is not set 94# CONFIG_PROFILING is not set
93# CONFIG_MARKERS is not set 95CONFIG_HAVE_OPROFILE=y
96CONFIG_HAVE_DMA_ATTRS=y
97CONFIG_HAVE_DMA_API_DEBUG=y
94 98
95# 99#
96# GCOV-based kernel profiling 100# GCOV-based kernel profiling
@@ -116,14 +120,41 @@ CONFIG_LBDAF=y
116# IO Schedulers 120# IO Schedulers
117# 121#
118CONFIG_IOSCHED_NOOP=y 122CONFIG_IOSCHED_NOOP=y
119CONFIG_IOSCHED_AS=y
120CONFIG_IOSCHED_DEADLINE=y 123CONFIG_IOSCHED_DEADLINE=y
121CONFIG_IOSCHED_CFQ=y 124CONFIG_IOSCHED_CFQ=y
122# CONFIG_DEFAULT_AS is not set
123# CONFIG_DEFAULT_DEADLINE is not set 125# CONFIG_DEFAULT_DEADLINE is not set
124CONFIG_DEFAULT_CFQ=y 126CONFIG_DEFAULT_CFQ=y
125# CONFIG_DEFAULT_NOOP is not set 127# CONFIG_DEFAULT_NOOP is not set
126CONFIG_DEFAULT_IOSCHED="cfq" 128CONFIG_DEFAULT_IOSCHED="cfq"
129# CONFIG_INLINE_SPIN_TRYLOCK is not set
130# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
131# CONFIG_INLINE_SPIN_LOCK is not set
132# CONFIG_INLINE_SPIN_LOCK_BH is not set
133# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
134# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
135CONFIG_INLINE_SPIN_UNLOCK=y
136# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
137CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
138# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
139# CONFIG_INLINE_READ_TRYLOCK is not set
140# CONFIG_INLINE_READ_LOCK is not set
141# CONFIG_INLINE_READ_LOCK_BH is not set
142# CONFIG_INLINE_READ_LOCK_IRQ is not set
143# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
144CONFIG_INLINE_READ_UNLOCK=y
145# CONFIG_INLINE_READ_UNLOCK_BH is not set
146CONFIG_INLINE_READ_UNLOCK_IRQ=y
147# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
148# CONFIG_INLINE_WRITE_TRYLOCK is not set
149# CONFIG_INLINE_WRITE_LOCK is not set
150# CONFIG_INLINE_WRITE_LOCK_BH is not set
151# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
152# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
153CONFIG_INLINE_WRITE_UNLOCK=y
154# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
155CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
156# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
157# CONFIG_MUTEX_SPIN_ON_OWNER is not set
127# CONFIG_FREEZER is not set 158# CONFIG_FREEZER is not set
128 159
129# 160#
@@ -132,7 +163,10 @@ CONFIG_DEFAULT_IOSCHED="cfq"
132CONFIG_PLATFORM_GENERIC=y 163CONFIG_PLATFORM_GENERIC=y
133# CONFIG_SELFMOD is not set 164# CONFIG_SELFMOD is not set
134# CONFIG_OPT_LIB_FUNCTION is not set 165# CONFIG_OPT_LIB_FUNCTION is not set
135# CONFIG_ALLOW_EDIT_AUTO is not set 166
167#
168# Definitions for MICROBLAZE0
169#
136CONFIG_KERNEL_BASE_ADDR=0x90000000 170CONFIG_KERNEL_BASE_ADDR=0x90000000
137CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5" 171CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
138CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 172CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
@@ -174,11 +208,14 @@ CONFIG_PROC_DEVICETREE=y
174# 208#
175# Advanced setup 209# Advanced setup
176# 210#
211# CONFIG_ADVANCED_OPTIONS is not set
177 212
178# 213#
179# Default settings for advanced configuration options are used 214# Default settings for advanced configuration options are used
180# 215#
216CONFIG_LOWMEM_SIZE=0x30000000
181CONFIG_KERNEL_START=0x90000000 217CONFIG_KERNEL_START=0x90000000
218CONFIG_TASK_SIZE=0x80000000
182CONFIG_SELECT_MEMORY_MODEL=y 219CONFIG_SELECT_MEMORY_MODEL=y
183CONFIG_FLATMEM_MANUAL=y 220CONFIG_FLATMEM_MANUAL=y
184# CONFIG_DISCONTIGMEM_MANUAL is not set 221# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -190,7 +227,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
190# CONFIG_PHYS_ADDR_T_64BIT is not set 227# CONFIG_PHYS_ADDR_T_64BIT is not set
191CONFIG_ZONE_DMA_FLAG=0 228CONFIG_ZONE_DMA_FLAG=0
192CONFIG_VIRT_TO_BUS=y 229CONFIG_VIRT_TO_BUS=y
193CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
194CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1 230CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
195 231
196# 232#
@@ -201,13 +237,20 @@ CONFIG_BINFMT_FLAT=y
201# CONFIG_BINFMT_SHARED_FLAT is not set 237# CONFIG_BINFMT_SHARED_FLAT is not set
202# CONFIG_HAVE_AOUT is not set 238# CONFIG_HAVE_AOUT is not set
203# CONFIG_BINFMT_MISC is not set 239# CONFIG_BINFMT_MISC is not set
240
241#
242# Bus Options
243#
244# CONFIG_PCI is not set
245# CONFIG_PCI_DOMAINS is not set
246# CONFIG_PCI_SYSCALL is not set
247# CONFIG_ARCH_SUPPORTS_MSI is not set
204CONFIG_NET=y 248CONFIG_NET=y
205 249
206# 250#
207# Networking options 251# Networking options
208# 252#
209CONFIG_PACKET=y 253CONFIG_PACKET=y
210# CONFIG_PACKET_MMAP is not set
211CONFIG_UNIX=y 254CONFIG_UNIX=y
212CONFIG_XFRM=y 255CONFIG_XFRM=y
213# CONFIG_XFRM_USER is not set 256# CONFIG_XFRM_USER is not set
@@ -274,9 +317,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
274# CONFIG_AF_RXRPC is not set 317# CONFIG_AF_RXRPC is not set
275CONFIG_WIRELESS=y 318CONFIG_WIRELESS=y
276# CONFIG_CFG80211 is not set 319# CONFIG_CFG80211 is not set
277CONFIG_CFG80211_DEFAULT_PS_VALUE=0
278CONFIG_WIRELESS_OLD_REGULATORY=y
279# CONFIG_WIRELESS_EXT is not set
280# CONFIG_LIB80211 is not set 320# CONFIG_LIB80211 is not set
281 321
282# 322#
@@ -301,9 +341,9 @@ CONFIG_STANDALONE=y
301# CONFIG_CONNECTOR is not set 341# CONFIG_CONNECTOR is not set
302CONFIG_MTD=y 342CONFIG_MTD=y
303# CONFIG_MTD_DEBUG is not set 343# CONFIG_MTD_DEBUG is not set
344# CONFIG_MTD_TESTS is not set
304CONFIG_MTD_CONCAT=y 345CONFIG_MTD_CONCAT=y
305CONFIG_MTD_PARTITIONS=y 346CONFIG_MTD_PARTITIONS=y
306# CONFIG_MTD_TESTS is not set
307# CONFIG_MTD_REDBOOT_PARTS is not set 347# CONFIG_MTD_REDBOOT_PARTS is not set
308CONFIG_MTD_CMDLINE_PARTS=y 348CONFIG_MTD_CMDLINE_PARTS=y
309# CONFIG_MTD_OF_PARTS is not set 349# CONFIG_MTD_OF_PARTS is not set
@@ -382,11 +422,16 @@ CONFIG_MTD_UCLINUX=y
382# UBI - Unsorted block images 422# UBI - Unsorted block images
383# 423#
384# CONFIG_MTD_UBI is not set 424# CONFIG_MTD_UBI is not set
425CONFIG_OF_FLATTREE=y
385CONFIG_OF_DEVICE=y 426CONFIG_OF_DEVICE=y
386# CONFIG_PARPORT is not set 427# CONFIG_PARPORT is not set
387CONFIG_BLK_DEV=y 428CONFIG_BLK_DEV=y
388# CONFIG_BLK_DEV_COW_COMMON is not set 429# CONFIG_BLK_DEV_COW_COMMON is not set
389# CONFIG_BLK_DEV_LOOP is not set 430# CONFIG_BLK_DEV_LOOP is not set
431
432#
433# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
434#
390CONFIG_BLK_DEV_NBD=y 435CONFIG_BLK_DEV_NBD=y
391CONFIG_BLK_DEV_RAM=y 436CONFIG_BLK_DEV_RAM=y
392CONFIG_BLK_DEV_RAM_COUNT=16 437CONFIG_BLK_DEV_RAM_COUNT=16
@@ -407,6 +452,7 @@ CONFIG_MISC_DEVICES=y
407# 452#
408# SCSI device support 453# SCSI device support
409# 454#
455CONFIG_SCSI_MOD=y
410# CONFIG_RAID_ATTRS is not set 456# CONFIG_RAID_ATTRS is not set
411# CONFIG_SCSI is not set 457# CONFIG_SCSI is not set
412# CONFIG_SCSI_DMA is not set 458# CONFIG_SCSI_DMA is not set
@@ -432,13 +478,14 @@ CONFIG_NET_ETHERNET=y
432# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set 478# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
433# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 479# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
434# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 480# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
481# CONFIG_B44 is not set
435# CONFIG_KS8842 is not set 482# CONFIG_KS8842 is not set
483# CONFIG_KS8851_MLL is not set
436# CONFIG_XILINX_EMACLITE is not set 484# CONFIG_XILINX_EMACLITE is not set
437CONFIG_NETDEV_1000=y 485CONFIG_NETDEV_1000=y
438CONFIG_NETDEV_10000=y 486CONFIG_NETDEV_10000=y
439CONFIG_WLAN=y 487CONFIG_WLAN=y
440# CONFIG_WLAN_PRE80211 is not set 488# CONFIG_HOSTAP is not set
441# CONFIG_WLAN_80211 is not set
442 489
443# 490#
444# Enable WiMAX (Networking options) to see the WiMAX drivers 491# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -482,6 +529,8 @@ CONFIG_SERIAL_UARTLITE=y
482CONFIG_SERIAL_UARTLITE_CONSOLE=y 529CONFIG_SERIAL_UARTLITE_CONSOLE=y
483CONFIG_SERIAL_CORE=y 530CONFIG_SERIAL_CORE=y
484CONFIG_SERIAL_CORE_CONSOLE=y 531CONFIG_SERIAL_CORE_CONSOLE=y
532# CONFIG_SERIAL_TIMBERDALE is not set
533# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
485CONFIG_UNIX98_PTYS=y 534CONFIG_UNIX98_PTYS=y
486# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 535# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
487CONFIG_LEGACY_PTYS=y 536CONFIG_LEGACY_PTYS=y
@@ -508,8 +557,13 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
508# CONFIG_POWER_SUPPLY is not set 557# CONFIG_POWER_SUPPLY is not set
509# CONFIG_HWMON is not set 558# CONFIG_HWMON is not set
510# CONFIG_THERMAL is not set 559# CONFIG_THERMAL is not set
511# CONFIG_THERMAL_HWMON is not set
512# CONFIG_WATCHDOG is not set 560# CONFIG_WATCHDOG is not set
561CONFIG_SSB_POSSIBLE=y
562
563#
564# Sonics Silicon Backplane
565#
566# CONFIG_SSB is not set
513 567
514# 568#
515# Multifunction device drivers 569# Multifunction device drivers
@@ -559,6 +613,7 @@ CONFIG_USB_ARCH_HAS_EHCI=y
559# CONFIG_NEW_LEDS is not set 613# CONFIG_NEW_LEDS is not set
560# CONFIG_ACCESSIBILITY is not set 614# CONFIG_ACCESSIBILITY is not set
561# CONFIG_RTC_CLASS is not set 615# CONFIG_RTC_CLASS is not set
616# CONFIG_DMADEVICES is not set
562# CONFIG_AUXDISPLAY is not set 617# CONFIG_AUXDISPLAY is not set
563# CONFIG_UIO is not set 618# CONFIG_UIO is not set
564 619
@@ -616,7 +671,6 @@ CONFIG_INOTIFY_USER=y
616CONFIG_PROC_FS=y 671CONFIG_PROC_FS=y
617CONFIG_PROC_SYSCTL=y 672CONFIG_PROC_SYSCTL=y
618CONFIG_SYSFS=y 673CONFIG_SYSFS=y
619# CONFIG_TMPFS is not set
620# CONFIG_HUGETLB_PAGE is not set 674# CONFIG_HUGETLB_PAGE is not set
621# CONFIG_CONFIGFS_FS is not set 675# CONFIG_CONFIGFS_FS is not set
622CONFIG_MISC_FILESYSTEMS=y 676CONFIG_MISC_FILESYSTEMS=y
@@ -628,6 +682,7 @@ CONFIG_MISC_FILESYSTEMS=y
628# CONFIG_BFS_FS is not set 682# CONFIG_BFS_FS is not set
629# CONFIG_EFS_FS is not set 683# CONFIG_EFS_FS is not set
630# CONFIG_JFFS2_FS is not set 684# CONFIG_JFFS2_FS is not set
685# CONFIG_LOGFS is not set
631CONFIG_CRAMFS=y 686CONFIG_CRAMFS=y
632# CONFIG_SQUASHFS is not set 687# CONFIG_SQUASHFS is not set
633# CONFIG_VXFS_FS is not set 688# CONFIG_VXFS_FS is not set
@@ -656,6 +711,7 @@ CONFIG_SUNRPC=y
656# CONFIG_RPCSEC_GSS_KRB5 is not set 711# CONFIG_RPCSEC_GSS_KRB5 is not set
657# CONFIG_RPCSEC_GSS_SPKM3 is not set 712# CONFIG_RPCSEC_GSS_SPKM3 is not set
658# CONFIG_SMB_FS is not set 713# CONFIG_SMB_FS is not set
714# CONFIG_CEPH_FS is not set
659# CONFIG_CIFS is not set 715# CONFIG_CIFS is not set
660# CONFIG_NCP_FS is not set 716# CONFIG_NCP_FS is not set
661# CONFIG_CODA_FS is not set 717# CONFIG_CODA_FS is not set
@@ -672,11 +728,13 @@ CONFIG_MSDOS_PARTITION=y
672# 728#
673# Kernel hacking 729# Kernel hacking
674# 730#
731CONFIG_TRACE_IRQFLAGS_SUPPORT=y
675# CONFIG_PRINTK_TIME is not set 732# CONFIG_PRINTK_TIME is not set
676CONFIG_ENABLE_WARN_DEPRECATED=y 733CONFIG_ENABLE_WARN_DEPRECATED=y
677CONFIG_ENABLE_MUST_CHECK=y 734CONFIG_ENABLE_MUST_CHECK=y
678CONFIG_FRAME_WARN=1024 735CONFIG_FRAME_WARN=1024
679# CONFIG_MAGIC_SYSRQ is not set 736# CONFIG_MAGIC_SYSRQ is not set
737# CONFIG_STRIP_ASM_SYMS is not set
680CONFIG_UNUSED_SYMBOLS=y 738CONFIG_UNUSED_SYMBOLS=y
681CONFIG_DEBUG_FS=y 739CONFIG_DEBUG_FS=y
682# CONFIG_HEADERS_CHECK is not set 740# CONFIG_HEADERS_CHECK is not set
@@ -695,12 +753,17 @@ CONFIG_DEBUG_OBJECTS=y
695CONFIG_DEBUG_OBJECTS_SELFTEST=y 753CONFIG_DEBUG_OBJECTS_SELFTEST=y
696CONFIG_DEBUG_OBJECTS_FREE=y 754CONFIG_DEBUG_OBJECTS_FREE=y
697CONFIG_DEBUG_OBJECTS_TIMERS=y 755CONFIG_DEBUG_OBJECTS_TIMERS=y
756# CONFIG_DEBUG_OBJECTS_WORK is not set
698CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 757CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
699# CONFIG_DEBUG_SLAB is not set 758# CONFIG_DEBUG_SLAB is not set
759# CONFIG_DEBUG_KMEMLEAK is not set
700# CONFIG_DEBUG_RT_MUTEXES is not set 760# CONFIG_DEBUG_RT_MUTEXES is not set
701# CONFIG_RT_MUTEX_TESTER is not set 761# CONFIG_RT_MUTEX_TESTER is not set
702# CONFIG_DEBUG_SPINLOCK is not set 762# CONFIG_DEBUG_SPINLOCK is not set
703# CONFIG_DEBUG_MUTEXES is not set 763# CONFIG_DEBUG_MUTEXES is not set
764# CONFIG_DEBUG_LOCK_ALLOC is not set
765# CONFIG_PROVE_LOCKING is not set
766# CONFIG_LOCK_STAT is not set
704# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 767# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
705# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 768# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
706# CONFIG_DEBUG_KOBJECT is not set 769# CONFIG_DEBUG_KOBJECT is not set
@@ -719,10 +782,32 @@ CONFIG_DEBUG_SG=y
719# CONFIG_BACKTRACE_SELF_TEST is not set 782# CONFIG_BACKTRACE_SELF_TEST is not set
720# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 783# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
721# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set 784# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
785# CONFIG_LKDTM is not set
722# CONFIG_FAULT_INJECTION is not set 786# CONFIG_FAULT_INJECTION is not set
787# CONFIG_LATENCYTOP is not set
723CONFIG_SYSCTL_SYSCALL_CHECK=y 788CONFIG_SYSCTL_SYSCALL_CHECK=y
724# CONFIG_PAGE_POISONING is not set 789# CONFIG_PAGE_POISONING is not set
790CONFIG_HAVE_FUNCTION_TRACER=y
791CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
792CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
793CONFIG_HAVE_DYNAMIC_FTRACE=y
794CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
795CONFIG_TRACING_SUPPORT=y
796CONFIG_FTRACE=y
797# CONFIG_FUNCTION_TRACER is not set
798# CONFIG_IRQSOFF_TRACER is not set
799# CONFIG_SCHED_TRACER is not set
800# CONFIG_ENABLE_DEFAULT_TRACERS is not set
801# CONFIG_BOOT_TRACER is not set
802CONFIG_BRANCH_PROFILE_NONE=y
803# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
804# CONFIG_PROFILE_ALL_BRANCHES is not set
805# CONFIG_STACK_TRACER is not set
806# CONFIG_KMEMTRACE is not set
807# CONFIG_WORKQUEUE_TRACER is not set
808# CONFIG_BLK_DEV_IO_TRACE is not set
725# CONFIG_DYNAMIC_DEBUG is not set 809# CONFIG_DYNAMIC_DEBUG is not set
810# CONFIG_DMA_API_DEBUG is not set
726# CONFIG_SAMPLES is not set 811# CONFIG_SAMPLES is not set
727CONFIG_EARLY_PRINTK=y 812CONFIG_EARLY_PRINTK=y
728# CONFIG_HEART_BEAT is not set 813# CONFIG_HEART_BEAT is not set
@@ -734,7 +819,11 @@ CONFIG_EARLY_PRINTK=y
734# CONFIG_KEYS is not set 819# CONFIG_KEYS is not set
735# CONFIG_SECURITY is not set 820# CONFIG_SECURITY is not set
736# CONFIG_SECURITYFS is not set 821# CONFIG_SECURITYFS is not set
737# CONFIG_SECURITY_FILE_CAPABILITIES is not set 822# CONFIG_DEFAULT_SECURITY_SELINUX is not set
823# CONFIG_DEFAULT_SECURITY_SMACK is not set
824# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
825CONFIG_DEFAULT_SECURITY_DAC=y
826CONFIG_DEFAULT_SECURITY=""
738CONFIG_CRYPTO=y 827CONFIG_CRYPTO=y
739 828
740# 829#
@@ -838,5 +927,6 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
838CONFIG_ZLIB_INFLATE=y 927CONFIG_ZLIB_INFLATE=y
839CONFIG_HAS_IOMEM=y 928CONFIG_HAS_IOMEM=y
840CONFIG_HAS_IOPORT=y 929CONFIG_HAS_IOPORT=y
930CONFIG_HAS_DMA=y
841CONFIG_HAVE_LMB=y 931CONFIG_HAVE_LMB=y
842CONFIG_NLATTR=y 932CONFIG_NLATTR=y
diff --git a/arch/microblaze/include/asm/asm-offsets.h b/arch/microblaze/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/microblaze/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
index c209c47509d5..4efe96a036f7 100644
--- a/arch/microblaze/include/asm/cache.h
+++ b/arch/microblaze/include/asm/cache.h
@@ -15,26 +15,10 @@
15 15
16#include <asm/registers.h> 16#include <asm/registers.h>
17 17
18#define L1_CACHE_SHIFT 2 18#define L1_CACHE_SHIFT 5
19/* word-granular cache in microblaze */ 19/* word-granular cache in microblaze */
20#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 20#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
21 21
22#define SMP_CACHE_BYTES L1_CACHE_BYTES 22#define SMP_CACHE_BYTES L1_CACHE_BYTES
23 23
24void _enable_icache(void);
25void _disable_icache(void);
26void _invalidate_icache(unsigned int addr);
27
28#define __enable_icache() _enable_icache()
29#define __disable_icache() _disable_icache()
30#define __invalidate_icache(addr) _invalidate_icache(addr)
31
32void _enable_dcache(void);
33void _disable_dcache(void);
34void _invalidate_dcache(unsigned int addr);
35
36#define __enable_dcache() _enable_dcache()
37#define __disable_dcache() _disable_dcache()
38#define __invalidate_dcache(addr) _invalidate_dcache(addr)
39
40#endif /* _ASM_MICROBLAZE_CACHE_H */ 24#endif /* _ASM_MICROBLAZE_CACHE_H */
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index f989d6aad648..a6edd356cd08 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -18,6 +18,8 @@
18/* Somebody depends on this; sigh... */ 18/* Somebody depends on this; sigh... */
19#include <linux/mm.h> 19#include <linux/mm.h>
20 20
21/* Look at Documentation/cachetlb.txt */
22
21/* 23/*
22 * Cache handling functions. 24 * Cache handling functions.
23 * Microblaze has a write-through data cache, meaning that the data cache 25 * Microblaze has a write-through data cache, meaning that the data cache
@@ -27,77 +29,81 @@
27 * instruction cache to make sure we don't fetch old, bad code. 29 * instruction cache to make sure we don't fetch old, bad code.
28 */ 30 */
29 31
32/* struct cache, d=dcache, i=icache, fl = flush, iv = invalidate,
33 * suffix r = range */
34struct scache {
35 /* icache */
36 void (*ie)(void); /* enable */
37 void (*id)(void); /* disable */
38 void (*ifl)(void); /* flush */
39 void (*iflr)(unsigned long a, unsigned long b);
40 void (*iin)(void); /* invalidate */
41 void (*iinr)(unsigned long a, unsigned long b);
42 /* dcache */
43 void (*de)(void); /* enable */
44 void (*dd)(void); /* disable */
45 void (*dfl)(void); /* flush */
46 void (*dflr)(unsigned long a, unsigned long b);
47 void (*din)(void); /* invalidate */
48 void (*dinr)(unsigned long a, unsigned long b);
49};
50
51/* microblaze cache */
52extern struct scache *mbc;
53
54void microblaze_cache_init(void);
55
56#define enable_icache() mbc->ie();
57#define disable_icache() mbc->id();
58#define flush_icache() mbc->ifl();
59#define flush_icache_range(start, end) mbc->iflr(start, end);
60#define invalidate_icache() mbc->iin();
61#define invalidate_icache_range(start, end) mbc->iinr(start, end);
62
63
64#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
65#define flush_icache_page(vma, pg) do { } while (0)
66
67#define enable_dcache() mbc->de();
68#define disable_dcache() mbc->dd();
30/* FIXME for LL-temac driver */ 69/* FIXME for LL-temac driver */
31#define invalidate_dcache_range(start, end) \ 70#define invalidate_dcache() mbc->din();
32 __invalidate_dcache_range(start, end) 71#define invalidate_dcache_range(start, end) mbc->dinr(start, end);
33 72#define flush_dcache() mbc->dfl();
34#define flush_cache_all() __invalidate_cache_all() 73#define flush_dcache_range(start, end) mbc->dflr(start, end);
35#define flush_cache_mm(mm) do { } while (0)
36#define flush_cache_range(vma, start, end) __invalidate_cache_all()
37#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
38 74
39#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end) 75#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
76/* D-cache aliasing problem can't happen - cache is between MMU and ram */
40#define flush_dcache_page(page) do { } while (0) 77#define flush_dcache_page(page) do { } while (0)
41#define flush_dcache_mmap_lock(mapping) do { } while (0) 78#define flush_dcache_mmap_lock(mapping) do { } while (0)
42#define flush_dcache_mmap_unlock(mapping) do { } while (0) 79#define flush_dcache_mmap_unlock(mapping) do { } while (0)
43 80
44#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
45#define flush_icache_page(vma, pg) do { } while (0)
46
47#ifndef CONFIG_MMU
48# define flush_icache_user_range(start, len) do { } while (0)
49#else
50# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
51
52# define flush_page_to_ram(page) do { } while (0)
53 81
54# define flush_icache() __invalidate_icache_all() 82#define flush_cache_dup_mm(mm) do { } while (0)
55# define flush_cache_sigtramp(vaddr) \ 83#define flush_cache_vmap(start, end) do { } while (0)
56 __invalidate_icache_range(vaddr, vaddr + 8) 84#define flush_cache_vunmap(start, end) do { } while (0)
57 85#define flush_cache_mm(mm) do { } while (0)
58# define flush_dcache_mmap_lock(mapping) do { } while (0) 86#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
59# define flush_dcache_mmap_unlock(mapping) do { } while (0)
60 87
61# define flush_cache_dup_mm(mm) do { } while (0) 88/* MS: kgdb code use this macro, wrong len with FLASH */
89#if 0
90#define flush_cache_range(vma, start, len) { \
91 flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
92 flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
93}
62#endif 94#endif
63 95
64#define flush_cache_vmap(start, end) do { } while (0) 96#define flush_cache_range(vma, start, len) do { } while (0)
65#define flush_cache_vunmap(start, end) do { } while (0)
66
67struct page;
68struct mm_struct;
69struct vm_area_struct;
70
71/* see arch/microblaze/kernel/cache.c */
72extern void __invalidate_icache_all(void);
73extern void __invalidate_icache_range(unsigned long start, unsigned long end);
74extern void __invalidate_icache_page(struct vm_area_struct *vma,
75 struct page *page);
76extern void __invalidate_icache_user_range(struct vm_area_struct *vma,
77 struct page *page,
78 unsigned long adr, int len);
79extern void __invalidate_cache_sigtramp(unsigned long addr);
80
81extern void __invalidate_dcache_all(void);
82extern void __invalidate_dcache_range(unsigned long start, unsigned long end);
83extern void __invalidate_dcache_page(struct vm_area_struct *vma,
84 struct page *page);
85extern void __invalidate_dcache_user_range(struct vm_area_struct *vma,
86 struct page *page,
87 unsigned long adr, int len);
88
89extern inline void __invalidate_cache_all(void)
90{
91 __invalidate_icache_all();
92 __invalidate_dcache_all();
93}
94 97
95#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 98#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
96do { memcpy((dst), (src), (len)); \ 99do { \
97 flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ 100 memcpy((dst), (src), (len)); \
101 flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
98} while (0) 102} while (0)
99 103
100#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 104#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
101 memcpy((dst), (src), (len)) 105do { \
106 memcpy((dst), (src), (len)); \
107} while (0)
102 108
103#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ 109#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h
index 52f28f6dc4eb..b4f5ca33aebf 100644
--- a/arch/microblaze/include/asm/cpuinfo.h
+++ b/arch/microblaze/include/asm/cpuinfo.h
@@ -43,7 +43,7 @@ struct cpuinfo {
43 u32 use_icache; 43 u32 use_icache;
44 u32 icache_tagbits; 44 u32 icache_tagbits;
45 u32 icache_write; 45 u32 icache_write;
46 u32 icache_line; 46 u32 icache_line_length;
47 u32 icache_size; 47 u32 icache_size;
48 unsigned long icache_base; 48 unsigned long icache_base;
49 unsigned long icache_high; 49 unsigned long icache_high;
@@ -51,8 +51,9 @@ struct cpuinfo {
51 u32 use_dcache; 51 u32 use_dcache;
52 u32 dcache_tagbits; 52 u32 dcache_tagbits;
53 u32 dcache_write; 53 u32 dcache_write;
54 u32 dcache_line; 54 u32 dcache_line_length;
55 u32 dcache_size; 55 u32 dcache_size;
56 u32 dcache_wb;
56 unsigned long dcache_base; 57 unsigned long dcache_base;
57 unsigned long dcache_high; 58 unsigned long dcache_high;
58 59
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 30286db27c1c..402b46e630f6 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -14,11 +14,27 @@ struct device_node;
14struct dev_archdata { 14struct dev_archdata {
15 /* Optional pointer to an OF device node */ 15 /* Optional pointer to an OF device node */
16 struct device_node *of_node; 16 struct device_node *of_node;
17
18 /* DMA operations on that device */
19 struct dma_map_ops *dma_ops;
20 void *dma_data;
17}; 21};
18 22
19struct pdev_archdata { 23struct pdev_archdata {
20}; 24};
21 25
26static inline void dev_archdata_set_node(struct dev_archdata *ad,
27 struct device_node *np)
28{
29 ad->of_node = np;
30}
31
32static inline struct device_node *
33dev_archdata_get_node(const struct dev_archdata *ad)
34{
35 return ad->of_node;
36}
37
22#endif /* _ASM_MICROBLAZE_DEVICE_H */ 38#endif /* _ASM_MICROBLAZE_DEVICE_H */
23 39
24 40
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index d00e40099165..18b3731c8509 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -1 +1,153 @@
1#include <asm-generic/dma-mapping-broken.h> 1/*
2 * Implements the generic device dma API for microblaze and the pci
3 *
4 * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009-2010 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * This file is base on powerpc and x86 dma-mapping.h versions
12 * Copyright (C) 2004 IBM
13 */
14
15#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
16#define _ASM_MICROBLAZE_DMA_MAPPING_H
17
18/*
19 * See Documentation/PCI/PCI-DMA-mapping.txt and
20 * Documentation/DMA-API.txt for documentation.
21 */
22
23#include <linux/types.h>
24#include <linux/cache.h>
25#include <linux/mm.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-debug.h>
28#include <linux/dma-attrs.h>
29#include <asm/io.h>
30#include <asm-generic/dma-coherent.h>
31
32#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
33
34#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
35#define __dma_free_coherent(size, addr) ((void)0)
36#define __dma_sync(addr, size, rw) ((void)0)
37
38static inline unsigned long device_to_mask(struct device *dev)
39{
40 if (dev->dma_mask && *dev->dma_mask)
41 return *dev->dma_mask;
42 /* Assume devices without mask can take 32 bit addresses */
43 return 0xfffffffful;
44}
45
46extern struct dma_map_ops *dma_ops;
47
48/*
49 * Available generic sets of operations
50 */
51extern struct dma_map_ops dma_direct_ops;
52
53static inline struct dma_map_ops *get_dma_ops(struct device *dev)
54{
55 /* We don't handle the NULL dev case for ISA for now. We could
56 * do it via an out of line call but it is not needed for now. The
57 * only ISA DMA device we support is the floppy and we have a hack
58 * in the floppy driver directly to get a device for us.
59 */
60 if (unlikely(!dev) || !dev->archdata.dma_ops)
61 return NULL;
62
63 return dev->archdata.dma_ops;
64}
65
66static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
67{
68 dev->archdata.dma_ops = ops;
69}
70
71static inline int dma_supported(struct device *dev, u64 mask)
72{
73 struct dma_map_ops *ops = get_dma_ops(dev);
74
75 if (unlikely(!ops))
76 return 0;
77 if (!ops->dma_supported)
78 return 1;
79 return ops->dma_supported(dev, mask);
80}
81
82#ifdef CONFIG_PCI
83/* We have our own implementation of pci_set_dma_mask() */
84#define HAVE_ARCH_PCI_SET_DMA_MASK
85
86#endif
87
88static inline int dma_set_mask(struct device *dev, u64 dma_mask)
89{
90 struct dma_map_ops *ops = get_dma_ops(dev);
91
92 if (unlikely(ops == NULL))
93 return -EIO;
94 if (ops->set_dma_mask)
95 return ops->set_dma_mask(dev, dma_mask);
96 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
97 return -EIO;
98 *dev->dma_mask = dma_mask;
99 return 0;
100}
101
102#include <asm-generic/dma-mapping-common.h>
103
104static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
105{
106 struct dma_map_ops *ops = get_dma_ops(dev);
107 if (ops->mapping_error)
108 return ops->mapping_error(dev, dma_addr);
109
110 return (dma_addr == DMA_ERROR_CODE);
111}
112
113#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
114#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
115#define dma_is_consistent(d, h) (1)
116
117static inline void *dma_alloc_coherent(struct device *dev, size_t size,
118 dma_addr_t *dma_handle, gfp_t flag)
119{
120 struct dma_map_ops *ops = get_dma_ops(dev);
121 void *memory;
122
123 BUG_ON(!ops);
124
125 memory = ops->alloc_coherent(dev, size, dma_handle, flag);
126
127 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
128 return memory;
129}
130
131static inline void dma_free_coherent(struct device *dev, size_t size,
132 void *cpu_addr, dma_addr_t dma_handle)
133{
134 struct dma_map_ops *ops = get_dma_ops(dev);
135
136 BUG_ON(!ops);
137 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
138 ops->free_coherent(dev, size, cpu_addr, dma_handle);
139}
140
141static inline int dma_get_cache_alignment(void)
142{
143 return L1_CACHE_BYTES;
144}
145
146static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
147 enum dma_data_direction direction)
148{
149 BUG_ON(direction == DMA_NONE);
150 __dma_sync(vaddr, size, (int)direction);
151}
152
153#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h
index 08c073badf19..0d73d0c6de37 100644
--- a/arch/microblaze/include/asm/dma.h
+++ b/arch/microblaze/include/asm/dma.h
@@ -18,4 +18,10 @@
18#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1) 18#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
19#endif 19#endif
20 20
21#ifdef CONFIG_PCI
22extern int isa_dma_bridge_buggy;
23#else
24#define isa_dma_bridge_buggy (0)
25#endif
26
21#endif /* _ASM_MICROBLAZE_DMA_H */ 27#endif /* _ASM_MICROBLAZE_DMA_H */
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index f92fc0dda006..7d4acf2b278e 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
77#define ELF_DATA ELFDATA2MSB 77#define ELF_DATA ELFDATA2MSB
78#endif 78#endif
79 79
80#define USE_ELF_CORE_DUMP
81#define ELF_EXEC_PAGESIZE 4096 80#define ELF_EXEC_PAGESIZE 4096
82 81
83 82
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 61abbd232640..ec89f2ad0fe1 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -21,7 +21,7 @@
21 * places 21 * places
22 */ 22 */
23 23
24#define PER_CPU(var) per_cpu__##var 24#define PER_CPU(var) var
25 25
26# ifndef __ASSEMBLY__ 26# ifndef __ASSEMBLY__
27DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */ 27DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h
index 90731df9e574..4c7b5d037c88 100644
--- a/arch/microblaze/include/asm/exceptions.h
+++ b/arch/microblaze/include/asm/exceptions.h
@@ -64,12 +64,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
64void die(const char *str, struct pt_regs *fp, long err); 64void die(const char *str, struct pt_regs *fp, long err);
65void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); 65void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
66 66
67#ifdef CONFIG_MMU
68void __bug(const char *file, int line, void *data);
69int bad_trap(int trap_num, struct pt_regs *regs);
70int debug_trap(struct pt_regs *regs);
71#endif /* CONFIG_MMU */
72
73#if defined(CONFIG_KGDB) 67#if defined(CONFIG_KGDB)
74void (*debugger)(struct pt_regs *regs); 68void (*debugger)(struct pt_regs *regs);
75int (*debugger_bpt)(struct pt_regs *regs); 69int (*debugger_bpt)(struct pt_regs *regs);
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
index 8b137891791f..fd2fa2eca62f 100644
--- a/arch/microblaze/include/asm/ftrace.h
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -1 +1,26 @@
1#ifndef _ASM_MICROBLAZE_FTRACE
2#define _ASM_MICROBLAZE_FTRACE
1 3
4#ifdef CONFIG_FUNCTION_TRACER
5
6#define MCOUNT_ADDR ((long)(_mcount))
7#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
8
9#ifndef __ASSEMBLY__
10extern void _mcount(void);
11extern void ftrace_call_graph(void);
12#endif
13
14#ifdef CONFIG_DYNAMIC_FTRACE
15/* reloction of mcount call site is the same as the address */
16static inline unsigned long ftrace_call_adjust(unsigned long addr)
17{
18 return addr;
19}
20
21struct dyn_arch_ftrace {
22};
23#endif /* CONFIG_DYNAMIC_FTRACE */
24
25#endif /* CONFIG_FUNCTION_TRACER */
26#endif /* _ASM_MICROBLAZE_FTRACE */
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 0b745828f42b..ad3fd61b2fe7 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -1 +1,126 @@
1#include <asm-generic/futex.h> 1#ifndef _ASM_MICROBLAZE_FUTEX_H
2#define _ASM_MICROBLAZE_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <linux/uaccess.h>
8#include <asm/errno.h>
9
10#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
11({ \
12 __asm__ __volatile__ ( \
13 "1: lwx %0, %2, r0; " \
14 insn \
15 "2: swx %1, %2, r0; \
16 addic %1, r0, 0; \
17 bnei %1, 1b; \
18 3: \
19 .section .fixup,\"ax\"; \
20 4: brid 3b; \
21 addik %1, r0, %3; \
22 .previous; \
23 .section __ex_table,\"a\"; \
24 .word 1b,4b,2b,4b; \
25 .previous;" \
26 : "=&r" (oldval), "=&r" (ret) \
27 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
28 ); \
29})
30
31static inline int
32futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
33{
34 int op = (encoded_op >> 28) & 7;
35 int cmp = (encoded_op >> 24) & 15;
36 int oparg = (encoded_op << 8) >> 20;
37 int cmparg = (encoded_op << 20) >> 20;
38 int oldval = 0, ret;
39 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
40 oparg = 1 << oparg;
41
42 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
43 return -EFAULT;
44
45 pagefault_disable();
46
47 switch (op) {
48 case FUTEX_OP_SET:
49 __futex_atomic_op("or %1,%4,%4;", ret, oldval, uaddr, oparg);
50 break;
51 case FUTEX_OP_ADD:
52 __futex_atomic_op("add %1,%0,%4;", ret, oldval, uaddr, oparg);
53 break;
54 case FUTEX_OP_OR:
55 __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg);
56 break;
57 case FUTEX_OP_ANDN:
58 __futex_atomic_op("andn %1,%0,%4;", ret, oldval, uaddr, oparg);
59 break;
60 case FUTEX_OP_XOR:
61 __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg);
62 break;
63 default:
64 ret = -ENOSYS;
65 }
66
67 pagefault_enable();
68
69 if (!ret) {
70 switch (cmp) {
71 case FUTEX_OP_CMP_EQ:
72 ret = (oldval == cmparg);
73 break;
74 case FUTEX_OP_CMP_NE:
75 ret = (oldval != cmparg);
76 break;
77 case FUTEX_OP_CMP_LT:
78 ret = (oldval < cmparg);
79 break;
80 case FUTEX_OP_CMP_GE:
81 ret = (oldval >= cmparg);
82 break;
83 case FUTEX_OP_CMP_LE:
84 ret = (oldval <= cmparg);
85 break;
86 case FUTEX_OP_CMP_GT:
87 ret = (oldval > cmparg);
88 break;
89 default:
90 ret = -ENOSYS;
91 }
92 }
93 return ret;
94}
95
96static inline int
97futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
98{
99 int prev, cmp;
100
101 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
102 return -EFAULT;
103
104 __asm__ __volatile__ ("1: lwx %0, %2, r0; \
105 cmp %1, %0, %3; \
106 beqi %1, 3f; \
107 2: swx %4, %2, r0; \
108 addic %1, r0, 0; \
109 bnei %1, 1b; \
110 3: \
111 .section .fixup,\"ax\"; \
112 4: brid 3b; \
113 addik %0, r0, %5; \
114 .previous; \
115 .section __ex_table,\"a\"; \
116 .word 1b,4b,2b,4b; \
117 .previous;" \
118 : "=&r" (prev), "=&r"(cmp) \
119 : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
120
121 return prev;
122}
123
124#endif /* __KERNEL__ */
125
126#endif
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index fc9997b73c09..00b5398d08c7 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -15,7 +15,23 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/mm.h> /* Get struct page {...} */ 17#include <linux/mm.h> /* Get struct page {...} */
18#include <asm-generic/iomap.h>
18 19
20#ifndef CONFIG_PCI
21#define _IO_BASE 0
22#define _ISA_MEM_BASE 0
23#define PCI_DRAM_OFFSET 0
24#else
25#define _IO_BASE isa_io_base
26#define _ISA_MEM_BASE isa_mem_base
27#define PCI_DRAM_OFFSET pci_dram_offset
28#endif
29
30extern unsigned long isa_io_base;
31extern unsigned long pci_io_base;
32extern unsigned long pci_dram_offset;
33
34extern resource_size_t isa_mem_base;
19 35
20#define IO_SPACE_LIMIT (0xFFFFFFFF) 36#define IO_SPACE_LIMIT (0xFFFFFFFF)
21 37
@@ -92,6 +108,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
92#define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) 108#define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
93#define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) 109#define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
94 110
111#define ioread16be(addr) __raw_readw((u16 *)(addr))
112#define ioread32be(addr) __raw_readl((u32 *)(addr))
113#define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
114#define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
115
95/* These are the definitions for the x86 IO instructions 116/* These are the definitions for the x86 IO instructions
96 * inb/inw/inl/outb/outw/outl, the "string" versions 117 * inb/inw/inl/outb/outw/outl, the "string" versions
97 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions 118 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
@@ -118,15 +139,10 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
118 139
119#ifdef CONFIG_MMU 140#ifdef CONFIG_MMU
120 141
121#define mm_ptov(addr) ((void *)__phys_to_virt(addr))
122#define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr))
123#define phys_to_virt(addr) ((void *)__phys_to_virt(addr)) 142#define phys_to_virt(addr) ((void *)__phys_to_virt(addr))
124#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 143#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
125#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 144#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
126 145
127#define __page_address(page) \
128 (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
129#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
130#define page_to_bus(page) (page_to_phys(page)) 146#define page_to_bus(page) (page_to_phys(page))
131#define bus_to_virt(addr) (phys_to_virt(addr)) 147#define bus_to_virt(addr) (phys_to_virt(addr))
132 148
@@ -217,7 +233,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
217 * Little endian 233 * Little endian
218 */ 234 */
219 235
220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)); 236#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) 237#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
222 238
223#define in_le32(a) __le32_to_cpu(__raw_readl(a)) 239#define in_le32(a) __le32_to_cpu(__raw_readl(a))
@@ -227,15 +243,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
227#define out_8(a, v) __raw_writeb((v), (a)) 243#define out_8(a, v) __raw_writeb((v), (a))
228#define in_8(a) __raw_readb(a) 244#define in_8(a) __raw_readb(a)
229 245
230/* FIXME */ 246#define ioport_map(port, nr) ((void __iomem *)(port))
231static inline void __iomem *ioport_map(unsigned long port, unsigned int len) 247#define ioport_unmap(addr)
232{
233 return (void __iomem *) (port);
234}
235
236static inline void ioport_unmap(void __iomem *addr)
237{
238 /* Nothing to do */
239}
240 248
241#endif /* _ASM_MICROBLAZE_IO_H */ 249#endif /* _ASM_MICROBLAZE_IO_H */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index 90f050535ebe..31a35c33df63 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -14,6 +14,12 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16
17/* This type is the placeholder for a hardware interrupt number. It has to
18 * be big enough to enclose whatever representation is used by a given
19 * platform.
20 */
21typedef unsigned long irq_hw_number_t;
22
17extern unsigned int nr_irq; 23extern unsigned int nr_irq;
18 24
19#define NO_IRQ (-1) 25#define NO_IRQ (-1)
@@ -21,7 +27,8 @@ extern unsigned int nr_irq;
21struct pt_regs; 27struct pt_regs;
22extern void do_IRQ(struct pt_regs *regs); 28extern void do_IRQ(struct pt_regs *regs);
23 29
24/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 30/**
31 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
25 * @device: Device node of the device whose interrupt is to be mapped 32 * @device: Device node of the device whose interrupt is to be mapped
26 * @index: Index of the interrupt to map 33 * @index: Index of the interrupt to map
27 * 34 *
@@ -40,4 +47,32 @@ static inline void irq_dispose_mapping(unsigned int virq)
40 return; 47 return;
41} 48}
42 49
50struct irq_host;
51
52/**
53 * irq_create_mapping - Map a hardware interrupt into linux virq space
54 * @host: host owning this hardware interrupt or NULL for default host
55 * @hwirq: hardware irq number in that host space
56 *
57 * Only one mapping per hardware interrupt is permitted. Returns a linux
58 * virq number.
59 * If the sense/trigger is to be specified, set_irq_type() should be called
60 * on the number returned from that call.
61 */
62extern unsigned int irq_create_mapping(struct irq_host *host,
63 irq_hw_number_t hwirq);
64
65/**
66 * irq_create_of_mapping - Map a hardware interrupt into linux virq space
67 * @controller: Device node of the interrupt controller
68 * @inspec: Interrupt specifier from the device-tree
69 * @intsize: Size of the interrupt specifier from the device-tree
70 *
71 * This function is identical to irq_create_mapping except that it takes
72 * as input informations straight from the device-tree (typically the results
73 * of the of_irq_map_*() functions.
74 */
75extern unsigned int irq_create_of_mapping(struct device_node *controller,
76 u32 *intspec, unsigned int intsize);
77
43#endif /* _ASM_MICROBLAZE_IRQ_H */ 78#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index dea65645a4f8..2c38c6d80176 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -10,78 +10,73 @@
10#define _ASM_MICROBLAZE_IRQFLAGS_H 10#define _ASM_MICROBLAZE_IRQFLAGS_H
11 11
12#include <linux/irqflags.h> 12#include <linux/irqflags.h>
13#include <asm/registers.h>
13 14
14# if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 15# if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
15 16
16# define local_irq_save(flags) \ 17# define raw_local_irq_save(flags) \
17 do { \ 18 do { \
18 asm volatile ("# local_irq_save \n\t" \ 19 asm volatile (" msrclr %0, %1; \
19 "msrclr %0, %1 \n\t" \ 20 nop;" \
20 "nop \n\t" \
21 : "=r"(flags) \ 21 : "=r"(flags) \
22 : "i"(MSR_IE) \ 22 : "i"(MSR_IE) \
23 : "memory"); \ 23 : "memory"); \
24 } while (0) 24 } while (0)
25 25
26# define local_irq_disable() \ 26# define raw_local_irq_disable() \
27 do { \ 27 do { \
28 asm volatile ("# local_irq_disable \n\t" \ 28 asm volatile (" msrclr r0, %0; \
29 "msrclr r0, %0 \n\t" \ 29 nop;" \
30 "nop \n\t" \ 30 : \
31 : \ 31 : "i"(MSR_IE) \
32 : "i"(MSR_IE) \ 32 : "memory"); \
33 : "memory"); \
34 } while (0) 33 } while (0)
35 34
36# define local_irq_enable() \ 35# define raw_local_irq_enable() \
37 do { \ 36 do { \
38 asm volatile ("# local_irq_enable \n\t" \ 37 asm volatile (" msrset r0, %0; \
39 "msrset r0, %0 \n\t" \ 38 nop;" \
40 "nop \n\t" \ 39 : \
41 : \ 40 : "i"(MSR_IE) \
42 : "i"(MSR_IE) \ 41 : "memory"); \
43 : "memory"); \
44 } while (0) 42 } while (0)
45 43
46# else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */ 44# else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */
47 45
48# define local_irq_save(flags) \ 46# define raw_local_irq_save(flags) \
49 do { \ 47 do { \
50 register unsigned tmp; \ 48 register unsigned tmp; \
51 asm volatile ("# local_irq_save \n\t" \ 49 asm volatile (" mfs %0, rmsr; \
52 "mfs %0, rmsr \n\t" \ 50 nop; \
53 "nop \n\t" \ 51 andi %1, %0, %2; \
54 "andi %1, %0, %2 \n\t" \ 52 mts rmsr, %1; \
55 "mts rmsr, %1 \n\t" \ 53 nop;" \
56 "nop \n\t" \
57 : "=r"(flags), "=r" (tmp) \ 54 : "=r"(flags), "=r" (tmp) \
58 : "i"(~MSR_IE) \ 55 : "i"(~MSR_IE) \
59 : "memory"); \ 56 : "memory"); \
60 } while (0) 57 } while (0)
61 58
62# define local_irq_disable() \ 59# define raw_local_irq_disable() \
63 do { \ 60 do { \
64 register unsigned tmp; \ 61 register unsigned tmp; \
65 asm volatile ("# local_irq_disable \n\t" \ 62 asm volatile (" mfs %0, rmsr; \
66 "mfs %0, rmsr \n\t" \ 63 nop; \
67 "nop \n\t" \ 64 andi %0, %0, %1; \
68 "andi %0, %0, %1 \n\t" \ 65 mts rmsr, %0; \
69 "mts rmsr, %0 \n\t" \ 66 nop;" \
70 "nop \n\t" \
71 : "=r"(tmp) \ 67 : "=r"(tmp) \
72 : "i"(~MSR_IE) \ 68 : "i"(~MSR_IE) \
73 : "memory"); \ 69 : "memory"); \
74 } while (0) 70 } while (0)
75 71
76# define local_irq_enable() \ 72# define raw_local_irq_enable() \
77 do { \ 73 do { \
78 register unsigned tmp; \ 74 register unsigned tmp; \
79 asm volatile ("# local_irq_enable \n\t" \ 75 asm volatile (" mfs %0, rmsr; \
80 "mfs %0, rmsr \n\t" \ 76 nop; \
81 "nop \n\t" \ 77 ori %0, %0, %1; \
82 "ori %0, %0, %1 \n\t" \ 78 mts rmsr, %0; \
83 "mts rmsr, %0 \n\t" \ 79 nop;" \
84 "nop \n\t" \
85 : "=r"(tmp) \ 80 : "=r"(tmp) \
86 : "i"(MSR_IE) \ 81 : "i"(MSR_IE) \
87 : "memory"); \ 82 : "memory"); \
@@ -89,35 +84,28 @@
89 84
90# endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 85# endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
91 86
92#define local_save_flags(flags) \ 87#define raw_local_irq_restore(flags) \
93 do { \ 88 do { \
94 asm volatile ("# local_save_flags \n\t" \ 89 asm volatile (" mts rmsr, %0; \
95 "mfs %0, rmsr \n\t" \ 90 nop;" \
96 "nop \n\t" \
97 : "=r"(flags) \
98 : \ 91 : \
92 : "r"(flags) \
99 : "memory"); \ 93 : "memory"); \
100 } while (0) 94 } while (0)
101 95
102#define local_irq_restore(flags) \ 96static inline unsigned long get_msr(void)
103 do { \
104 asm volatile ("# local_irq_restore \n\t"\
105 "mts rmsr, %0 \n\t" \
106 "nop \n\t" \
107 : \
108 : "r"(flags) \
109 : "memory"); \
110 } while (0)
111
112static inline int irqs_disabled(void)
113{ 97{
114 unsigned long flags; 98 unsigned long flags;
115 99 asm volatile (" mfs %0, rmsr; \
116 local_save_flags(flags); 100 nop;" \
117 return ((flags & MSR_IE) == 0); 101 : "=r"(flags) \
102 : \
103 : "memory"); \
104 return flags;
118} 105}
119 106
120#define raw_irqs_disabled irqs_disabled 107#define raw_local_save_flags(flags) ((flags) = get_msr())
121#define raw_irqs_disabled_flags(flags) ((flags) == 0) 108#define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0)
109#define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0)
122 110
123#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */ 111#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 880c988c2237..de493f86d28f 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -31,6 +31,9 @@
31 31
32#ifndef __ASSEMBLY__ 32#ifndef __ASSEMBLY__
33 33
34/* MS be sure that SLAB allocates aligned objects */
35#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
36
34#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 37#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
35#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) 38#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
36 39
@@ -62,12 +65,6 @@ extern unsigned int __page_offset;
62#define PAGE_OFFSET CONFIG_KERNEL_START 65#define PAGE_OFFSET CONFIG_KERNEL_START
63 66
64/* 67/*
65 * MAP_NR -- given an address, calculate the index of the page struct which
66 * points to the address's page.
67 */
68#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
69
70/*
71 * The basic type of a PTE - 32 bit physical addressing. 68 * The basic type of a PTE - 32 bit physical addressing.
72 */ 69 */
73typedef unsigned long pte_basic_t; 70typedef unsigned long pte_basic_t;
@@ -76,14 +73,7 @@ typedef unsigned long pte_basic_t;
76 73
77#endif /* CONFIG_MMU */ 74#endif /* CONFIG_MMU */
78 75
79# ifndef CONFIG_MMU 76# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
80# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
81# define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
82# define free_user_page(page, addr) free_page(addr)
83# else /* CONFIG_MMU */
84extern void copy_page(void *to, void *from);
85# endif /* CONFIG_MMU */
86
87# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 77# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
88 78
89# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 79# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
@@ -154,7 +144,11 @@ extern int page_is_ram(unsigned long pfn);
154# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 144# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
155 145
156# ifdef CONFIG_MMU 146# ifdef CONFIG_MMU
157# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) 147
148# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
149# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
150# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
151
158# else /* CONFIG_MMU */ 152# else /* CONFIG_MMU */
159# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 153# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
160# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 154# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
@@ -164,7 +158,8 @@ extern int page_is_ram(unsigned long pfn);
164# endif /* CONFIG_MMU */ 158# endif /* CONFIG_MMU */
165 159
166# ifndef CONFIG_MMU 160# ifndef CONFIG_MMU
167# define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr) 161# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && \
162 ((pfn) <= (min_low_pfn + max_mapnr)))
168# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 163# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
169# else /* CONFIG_MMU */ 164# else /* CONFIG_MMU */
170# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT) 165# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 7ad28f6f5f1a..0c77cda9f5d8 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -1 +1,196 @@
1#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H
2#define _ASM_MICROBLAZE_PCI_BRIDGE_H
3#ifdef __KERNEL__
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
1#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/list.h>
12#include <linux/ioport.h>
13
14struct device_node;
15
16enum {
17 /* Force re-assigning all resources (ignore firmware
18 * setup completely)
19 */
20 PCI_REASSIGN_ALL_RSRC = 0x00000001,
21
22 /* Re-assign all bus numbers */
23 PCI_REASSIGN_ALL_BUS = 0x00000002,
24
25 /* Do not try to assign, just use existing setup */
26 PCI_PROBE_ONLY = 0x00000004,
27
28 /* Don't bother with ISA alignment unless the bridge has
29 * ISA forwarding enabled
30 */
31 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
32
33 /* Enable domain numbers in /proc */
34 PCI_ENABLE_PROC_DOMAINS = 0x00000010,
35 /* ... except for domain 0 */
36 PCI_COMPAT_DOMAIN_0 = 0x00000020,
37};
38
39/*
40 * Structure of a PCI controller (host bridge)
41 */
42struct pci_controller {
43 struct pci_bus *bus;
44 char is_dynamic;
45 struct device_node *dn;
46 struct list_head list_node;
47 struct device *parent;
48
49 int first_busno;
50 int last_busno;
51
52 int self_busno;
53
54 void __iomem *io_base_virt;
55 resource_size_t io_base_phys;
56
57 resource_size_t pci_io_size;
58
59 /* Some machines (PReP) have a non 1:1 mapping of
60 * the PCI memory space in the CPU bus space
61 */
62 resource_size_t pci_mem_offset;
63
64 /* Some machines have a special region to forward the ISA
65 * "memory" cycles such as VGA memory regions. Left to 0
66 * if unsupported
67 */
68 resource_size_t isa_mem_phys;
69 resource_size_t isa_mem_size;
70
71 struct pci_ops *ops;
72 unsigned int __iomem *cfg_addr;
73 void __iomem *cfg_data;
74
75 /*
76 * Used for variants of PCI indirect handling and possible quirks:
77 * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
78 * EXT_REG - provides access to PCI-e extended registers
79 * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
80 * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
81 * to determine which bus number to match on when generating type0
82 * config cycles
83 * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
84 * hanging if we don't have link and try to do config cycles to
85 * anything but the PHB. Only allow talking to the PHB if this is
86 * set.
87 * BIG_ENDIAN - cfg_addr is a big endian register
88 * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs
89 * on the PLB4. Effectively disable MRM commands by setting this.
90 */
91#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
92#define INDIRECT_TYPE_EXT_REG 0x00000002
93#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
94#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
95#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010
96#define INDIRECT_TYPE_BROKEN_MRM 0x00000020
97 u32 indirect_type;
98
99 /* Currently, we limit ourselves to 1 IO range and 3 mem
100 * ranges since the common pci_bus structure can't handle more
101 */
102 struct resource io_resource;
103 struct resource mem_resources[3];
104 int global_number; /* PCI domain number */
105};
106
107static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
108{
109 return bus->sysdata;
110}
111
112static inline int isa_vaddr_is_ioport(void __iomem *address)
113{
114 /* No specific ISA handling on ppc32 at this stage, it
115 * all goes through PCI
116 */
117 return 0;
118}
119
120/* These are used for config access before all the PCI probing
121 has been done. */
122extern int early_read_config_byte(struct pci_controller *hose, int bus,
123 int dev_fn, int where, u8 *val);
124extern int early_read_config_word(struct pci_controller *hose, int bus,
125 int dev_fn, int where, u16 *val);
126extern int early_read_config_dword(struct pci_controller *hose, int bus,
127 int dev_fn, int where, u32 *val);
128extern int early_write_config_byte(struct pci_controller *hose, int bus,
129 int dev_fn, int where, u8 val);
130extern int early_write_config_word(struct pci_controller *hose, int bus,
131 int dev_fn, int where, u16 val);
132extern int early_write_config_dword(struct pci_controller *hose, int bus,
133 int dev_fn, int where, u32 val);
134
135extern int early_find_capability(struct pci_controller *hose, int bus,
136 int dev_fn, int cap);
137
138extern void setup_indirect_pci(struct pci_controller *hose,
139 resource_size_t cfg_addr,
140 resource_size_t cfg_data, u32 flags);
141
142/* Get the PCI host controller for an OF device */
143extern struct pci_controller *pci_find_hose_for_OF_device(
144 struct device_node *node);
145
146/* Fill up host controller resources from the OF node */
147extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
148 struct device_node *dev, int primary);
149
150/* Allocate & free a PCI host bridge structure */
151extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
152extern void pcibios_free_controller(struct pci_controller *phb);
153extern void pcibios_setup_phb_resources(struct pci_controller *hose);
154
155#ifdef CONFIG_PCI
156extern unsigned int pci_flags;
157
158static inline void pci_set_flags(int flags)
159{
160 pci_flags = flags;
161}
162
163static inline void pci_add_flags(int flags)
164{
165 pci_flags |= flags;
166}
167
168static inline int pci_has_flag(int flag)
169{
170 return pci_flags & flag;
171}
172
173extern struct list_head hose_list;
174
175extern unsigned long pci_address_to_pio(phys_addr_t address);
176extern int pcibios_vaddr_is_ioport(void __iomem *address);
177#else
178static inline unsigned long pci_address_to_pio(phys_addr_t address)
179{
180 return (unsigned long)-1;
181}
182static inline int pcibios_vaddr_is_ioport(void __iomem *address)
183{
184 return 0;
185}
186
187static inline void pci_set_flags(int flags) { }
188static inline void pci_add_flags(int flags) { }
189static inline int pci_has_flag(int flag)
190{
191 return 0;
192}
193#endif /* CONFIG_PCI */
194
195#endif /* __KERNEL__ */
196#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 9f0df5faf2c8..5a388eeeb28f 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -1 +1,169 @@
1#include <asm-generic/pci.h> 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Based on powerpc version
8 */
9
10#ifndef __ASM_MICROBLAZE_PCI_H
11#define __ASM_MICROBLAZE_PCI_H
12#ifdef __KERNEL__
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/dma-mapping.h>
18#include <linux/pci.h>
19
20#include <asm/scatterlist.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24
25#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM 0x10000000
27
28struct pci_dev;
29
30/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
31#define IOBASE_BRIDGE_NUMBER 0
32#define IOBASE_MEMORY 1
33#define IOBASE_IO 2
34#define IOBASE_ISA_IO 3
35#define IOBASE_ISA_MEM 4
36
37#define pcibios_scan_all_fns(a, b) 0
38
39/*
40 * Set this to 1 if you want the kernel to re-assign all PCI
41 * bus numbers (don't do that on ppc64 yet !)
42 */
43#define pcibios_assign_all_busses() \
44 (pci_has_flag(PCI_REASSIGN_ALL_BUS))
45
46static inline void pcibios_set_master(struct pci_dev *dev)
47{
48 /* No special bus mastering setup handling */
49}
50
51static inline void pcibios_penalize_isa_irq(int irq, int active)
52{
53 /* We don't do dynamic PCI IRQ allocation */
54}
55
56#ifdef CONFIG_PCI
57extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
58extern struct dma_map_ops *get_pci_dma_ops(void);
59#else /* CONFIG_PCI */
60#define set_pci_dma_ops(d)
61#define get_pci_dma_ops() NULL
62#endif
63
64#ifdef CONFIG_PCI
65static inline void pci_dma_burst_advice(struct pci_dev *pdev,
66 enum pci_dma_burst_strategy *strat,
67 unsigned long *strategy_parameter)
68{
69 *strat = PCI_DMA_BURST_INFINITY;
70 *strategy_parameter = ~0UL;
71}
72#endif
73
74extern int pci_domain_nr(struct pci_bus *bus);
75
76/* Decide whether to display the domain number in /proc */
77extern int pci_proc_domain(struct pci_bus *bus);
78
79struct vm_area_struct;
80/* Map a range of PCI memory or I/O space for a device into user space */
81int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
82 enum pci_mmap_state mmap_state, int write_combine);
83
84/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
85#define HAVE_PCI_MMAP 1
86
87extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
88 size_t count);
89extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
90 size_t count);
91extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
92 struct vm_area_struct *vma,
93 enum pci_mmap_state mmap_state);
94
95#define HAVE_PCI_LEGACY 1
96
97/* The PCI address space does equal the physical memory
98 * address space (no IOMMU). The IDE and SCSI device layers use
99 * this boolean for bounce buffer decisions.
100 */
101#define PCI_DMA_BUS_IS_PHYS (1)
102
103extern void pcibios_resource_to_bus(struct pci_dev *dev,
104 struct pci_bus_region *region,
105 struct resource *res);
106
107extern void pcibios_bus_to_resource(struct pci_dev *dev,
108 struct resource *res,
109 struct pci_bus_region *region);
110
111static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
112 struct resource *res)
113{
114 struct resource *root = NULL;
115
116 if (res->flags & IORESOURCE_IO)
117 root = &ioport_resource;
118 if (res->flags & IORESOURCE_MEM)
119 root = &iomem_resource;
120
121 return root;
122}
123
124extern void pcibios_claim_one_bus(struct pci_bus *b);
125
126extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
127
128extern void pcibios_resource_survey(void);
129
130extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
131extern int remove_phb_dynamic(struct pci_controller *phb);
132
133extern struct pci_dev *of_create_pci_dev(struct device_node *node,
134 struct pci_bus *bus, int devfn);
135
136extern void of_scan_pci_bridge(struct device_node *node,
137 struct pci_dev *dev);
138
139extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
140extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
141
142extern int pci_read_irq_line(struct pci_dev *dev);
143
144extern int pci_bus_find_capability(struct pci_bus *bus,
145 unsigned int devfn, int cap);
146
147struct file;
148extern pgprot_t pci_phys_mem_access_prot(struct file *file,
149 unsigned long pfn,
150 unsigned long size,
151 pgprot_t prot);
152
153#define HAVE_ARCH_PCI_RESOURCE_TO_USER
154extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
155 const struct resource *rsrc,
156 resource_size_t *start, resource_size_t *end);
157
158extern void pcibios_setup_bus_devices(struct pci_bus *bus);
159extern void pcibios_setup_bus_self(struct pci_bus *bus);
160
161/* This part of code was originaly in xilinx-pci.h */
162#ifdef CONFIG_PCI_XILINX
163extern void __init xilinx_pci_init(void);
164#else
165static inline void __init xilinx_pci_init(void) { return; }
166#endif
167
168#endif /* __KERNEL__ */
169#endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index b0131da1387b..c614a893f8a3 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/cache.h> 21#include <asm/cache.h>
22#include <asm/pgtable.h>
22 23
23#define PGDIR_ORDER 0 24#define PGDIR_ORDER 0
24 25
@@ -106,26 +107,8 @@ extern inline void free_pgd_slow(pgd_t *pgd)
106 */ 107 */
107#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) 108#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
108#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 109#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
109/* FIXME two definition - look below */
110#define pmd_free(mm, x) do { } while (0)
111#define pgd_populate(mm, pmd, pte) BUG()
112 110
113static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 111extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
114 unsigned long address)
115{
116 pte_t *pte;
117 extern int mem_init_done;
118 extern void *early_get_page(void);
119 if (mem_init_done) {
120 pte = (pte_t *)__get_free_page(GFP_KERNEL |
121 __GFP_REPEAT | __GFP_ZERO);
122 } else {
123 pte = (pte_t *)early_get_page();
124 if (pte)
125 clear_page(pte);
126 }
127 return pte;
128}
129 112
130static inline struct page *pte_alloc_one(struct mm_struct *mm, 113static inline struct page *pte_alloc_one(struct mm_struct *mm,
131 unsigned long address) 114 unsigned long address)
@@ -192,14 +175,14 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
192 * the pgd will always be present.. 175 * the pgd will always be present..
193 */ 176 */
194#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 177#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
195/*#define pmd_free(mm, x) do { } while (0)*/ 178#define pmd_free(mm, x) do { } while (0)
196#define __pmd_free_tlb(tlb, x, addr) do { } while (0) 179#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
197#define pgd_populate(mm, pmd, pte) BUG() 180#define pgd_populate(mm, pmd, pte) BUG()
198 181
199extern int do_check_pgt_cache(int, int); 182extern int do_check_pgt_cache(int, int);
200 183
201#endif /* CONFIG_MMU */ 184#endif /* CONFIG_MMU */
202 185
203#define check_pgt_cache() do {} while (0) 186#define check_pgt_cache() do { } while (0)
204 187
205#endif /* _ASM_MICROBLAZE_PGALLOC_H */ 188#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index cc3a4dfc3eaa..ca2d92871545 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -16,6 +16,10 @@
16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
17 remap_pfn_range(vma, vaddr, pfn, size, prot) 17 remap_pfn_range(vma, vaddr, pfn, size, prot)
18 18
19#ifndef __ASSEMBLY__
20extern int mem_init_done;
21#endif
22
19#ifndef CONFIG_MMU 23#ifndef CONFIG_MMU
20 24
21#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 25#define pgd_present(pgd) (1) /* pages are always present on non MMU */
@@ -51,6 +55,8 @@ static inline int pte_file(pte_t pte) { return 0; }
51 55
52#define arch_enter_lazy_cpu_mode() do {} while (0) 56#define arch_enter_lazy_cpu_mode() do {} while (0)
53 57
58#define pgprot_noncached_wc(prot) prot
59
54#else /* CONFIG_MMU */ 60#else /* CONFIG_MMU */
55 61
56#include <asm-generic/4level-fixup.h> 62#include <asm-generic/4level-fixup.h>
@@ -68,7 +74,6 @@ static inline int pte_file(pte_t pte) { return 0; }
68 74
69extern unsigned long va_to_phys(unsigned long address); 75extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address); 76extern pte_t *va_to_pte(unsigned long address);
71extern unsigned long ioremap_bot, ioremap_base;
72 77
73/* 78/*
74 * The following only work if pte_present() is true. 79 * The following only work if pte_present() is true.
@@ -85,11 +90,25 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
85#define VMALLOC_START (CONFIG_KERNEL_START + \ 90#define VMALLOC_START (CONFIG_KERNEL_START + \
86 max(32 * 1024 * 1024UL, memory_size)) 91 max(32 * 1024 * 1024UL, memory_size))
87#define VMALLOC_END ioremap_bot 92#define VMALLOC_END ioremap_bot
88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
89 93
90#endif /* __ASSEMBLY__ */ 94#endif /* __ASSEMBLY__ */
91 95
92/* 96/*
97 * Macro to mark a page protection value as "uncacheable".
98 */
99
100#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
101 _PAGE_WRITETHRU)
102
103#define pgprot_noncached(prot) \
104 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
105 _PAGE_NO_CACHE | _PAGE_GUARDED))
106
107#define pgprot_noncached_wc(prot) \
108 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
109 _PAGE_NO_CACHE))
110
111/*
93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash 112 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
94 * table containing PTEs, together with a set of 16 segment registers, to 113 * table containing PTEs, together with a set of 16 segment registers, to
95 * define the virtual to physical address mapping. 114 * define the virtual to physical address mapping.
@@ -397,7 +416,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
397 mts rmsr, %2\n\ 416 mts rmsr, %2\n\
398 nop" 417 nop"
399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) 418 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
400 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) 419 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
401 : "cc"); 420 : "cc");
402 421
403 return old; 422 return old;
@@ -493,15 +512,6 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
493extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 512extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
494 513
495/* 514/*
496 * When flushing the tlb entry for a page, we also need to flush the hash
497 * table entry. flush_hash_page is assembler (for speed) in hashtable.S.
498 */
499extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
500
501/* Add an HPTE to the hash table */
502extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
503
504/*
505 * Encode and decode a swap entry. 515 * Encode and decode a swap entry.
506 * Note that the bits we use in a PTE for representing a swap entry 516 * Note that the bits we use in a PTE for representing a swap entry
507 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit 517 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
@@ -514,15 +524,7 @@ extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
514#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) 524#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
515#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) 525#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
516 526
517
518/* CONFIG_APUS */
519/* For virtual address to physical address conversion */
520extern void cache_clear(__u32 addr, int length);
521extern void cache_push(__u32 addr, int length);
522extern int mm_end_of_chunk(unsigned long addr, int len);
523extern unsigned long iopa(unsigned long addr); 527extern unsigned long iopa(unsigned long addr);
524/* extern unsigned long mm_ptov(unsigned long addr) \
525 __attribute__ ((const)); TBD */
526 528
527/* Values for nocacheflag and cmode */ 529/* Values for nocacheflag and cmode */
528/* These are not used by the APUS kernel_map, but prevents 530/* These are not used by the APUS kernel_map, but prevents
@@ -533,18 +535,6 @@ extern unsigned long iopa(unsigned long addr);
533#define IOMAP_NOCACHE_NONSER 2 535#define IOMAP_NOCACHE_NONSER 2
534#define IOMAP_NO_COPYBACK 3 536#define IOMAP_NO_COPYBACK 3
535 537
536/*
537 * Map some physical address range into the kernel address space.
538 */
539extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
540 int nocacheflag, unsigned long *memavailp);
541
542/*
543 * Set cache mode of (kernel space) address range.
544 */
545extern void kernel_set_cachemode(unsigned long address, unsigned long size,
546 unsigned int cmode);
547
548/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 538/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
549#define kern_addr_valid(addr) (1) 539#define kern_addr_valid(addr) (1)
550 540
@@ -558,26 +548,15 @@ extern void kernel_set_cachemode(unsigned long address, unsigned long size,
558void do_page_fault(struct pt_regs *regs, unsigned long address, 548void do_page_fault(struct pt_regs *regs, unsigned long address,
559 unsigned long error_code); 549 unsigned long error_code);
560 550
561void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
562 unsigned int size, int flags);
563
564void __init adjust_total_lowmem(void);
565void mapin_ram(void); 551void mapin_ram(void);
566int map_page(unsigned long va, phys_addr_t pa, int flags); 552int map_page(unsigned long va, phys_addr_t pa, int flags);
567 553
568extern int mem_init_done; 554extern int mem_init_done;
569extern unsigned long ioremap_base;
570extern unsigned long ioremap_bot;
571 555
572asmlinkage void __init mmu_init(void); 556asmlinkage void __init mmu_init(void);
573 557
574void __init *early_get_page(void); 558void __init *early_get_page(void);
575 559
576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
577void consistent_free(void *vaddr);
578void consistent_sync(void *vaddr, size_t size, int direction);
579void consistent_sync_page(struct page *page, unsigned long offset,
580 size_t size, int direction);
581#endif /* __ASSEMBLY__ */ 560#endif /* __ASSEMBLY__ */
582#endif /* __KERNEL__ */ 561#endif /* __KERNEL__ */
583 562
@@ -586,6 +565,14 @@ void consistent_sync_page(struct page *page, unsigned long offset,
586#ifndef __ASSEMBLY__ 565#ifndef __ASSEMBLY__
587#include <asm-generic/pgtable.h> 566#include <asm-generic/pgtable.h>
588 567
568extern unsigned long ioremap_bot, ioremap_base;
569
570void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
571void consistent_free(size_t size, void *vaddr);
572void consistent_sync(void *vaddr, size_t size, int direction);
573void consistent_sync_page(struct page *page, unsigned long offset,
574 size_t size, int direction);
575
589void setup_memory(void); 576void setup_memory(void);
590#endif /* __ASSEMBLY__ */ 577#endif /* __ASSEMBLY__ */
591 578
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 563c6b9453f0..8eeb09211ece 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -14,7 +14,6 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/registers.h> 16#include <asm/registers.h>
17#include <asm/segment.h>
18#include <asm/entry.h> 17#include <asm/entry.h>
19#include <asm/current.h> 18#include <asm/current.h>
20 19
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 37e6f305a68e..e7d67a329bd7 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -12,172 +12,44 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
16
15#ifndef _ASM_MICROBLAZE_PROM_H 17#ifndef _ASM_MICROBLAZE_PROM_H
16#define _ASM_MICROBLAZE_PROM_H 18#define _ASM_MICROBLAZE_PROM_H
17#ifdef __KERNEL__ 19#ifdef __KERNEL__
18
19/* Definitions used by the flattened device tree */
20#define OF_DT_HEADER 0xd00dfeed /* marker */
21#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
22#define OF_DT_END_NODE 0x2 /* End node */
23#define OF_DT_PROP 0x3 /* Property: name off, size, content */
24#define OF_DT_NOP 0x4 /* nop */
25#define OF_DT_END 0x9
26
27#define OF_DT_VERSION 0x10
28
29#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
30 21
31#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/of_fdt.h>
32#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
33#include <linux/platform_device.h> 25#include <linux/platform_device.h>
34#include <asm/irq.h> 26#include <asm/irq.h>
35#include <asm/atomic.h> 27#include <asm/atomic.h>
36 28
37#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
38#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
39
40#define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l))
41#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
42#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
43
44/*
45 * This is what gets passed to the kernel by prom_init or kexec
46 *
47 * The dt struct contains the device tree structure, full pathes and
48 * property contents. The dt strings contain a separate block with just
49 * the strings for the property names, and is fully page aligned and
50 * self contained in a page, so that it can be kept around by the kernel,
51 * each property name appears only once in this page (cheap compression)
52 *
53 * the mem_rsvmap contains a map of reserved ranges of physical memory,
54 * passing it here instead of in the device-tree itself greatly simplifies
55 * the job of everybody. It's just a list of u64 pairs (base/size) that
56 * ends when size is 0
57 */
58struct boot_param_header {
59 u32 magic; /* magic word OF_DT_HEADER */
60 u32 totalsize; /* total size of DT block */
61 u32 off_dt_struct; /* offset to structure */
62 u32 off_dt_strings; /* offset to strings */
63 u32 off_mem_rsvmap; /* offset to memory reserve map */
64 u32 version; /* format version */
65 u32 last_comp_version; /* last compatible version */
66 /* version 2 fields below */
67 u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
68 /* version 3 fields below */
69 u32 dt_strings_size; /* size of the DT strings block */
70 /* version 17 fields below */
71 u32 dt_struct_size; /* size of the DT structure block */
72};
73
74typedef u32 phandle;
75typedef u32 ihandle;
76
77struct property {
78 char *name;
79 int length;
80 void *value;
81 struct property *next;
82};
83
84struct device_node {
85 const char *name;
86 const char *type;
87 phandle node;
88 phandle linux_phandle;
89 char *full_name;
90
91 struct property *properties;
92 struct property *deadprops; /* removed properties */
93 struct device_node *parent;
94 struct device_node *child;
95 struct device_node *sibling;
96 struct device_node *next; /* next device of same type */
97 struct device_node *allnext; /* next in list of all nodes */
98 struct proc_dir_entry *pde; /* this node's proc directory */
99 struct kref kref;
100 unsigned long _flags;
101 void *data;
102};
103
104extern struct device_node *of_chosen;
105
106static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
107{
108 return test_bit(flag, &n->_flags);
109}
110
111static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
112{
113 set_bit(flag, &n->_flags);
114}
115
116#define HAVE_ARCH_DEVTREE_FIXUPS 29#define HAVE_ARCH_DEVTREE_FIXUPS
117 30
118static inline void set_node_proc_entry(struct device_node *dn,
119 struct proc_dir_entry *de)
120{
121 dn->pde = de;
122}
123
124extern struct device_node *allnodes; /* temporary while merging */
125extern rwlock_t devtree_lock; /* temporary while merging */
126
127extern struct device_node *of_find_all_nodes(struct device_node *prev);
128extern struct device_node *of_node_get(struct device_node *node);
129extern void of_node_put(struct device_node *node);
130
131/* For scanning the flat device-tree at boot time */
132extern int __init of_scan_flat_dt(int (*it)(unsigned long node,
133 const char *uname, int depth,
134 void *data),
135 void *data);
136extern void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
137 unsigned long *size);
138extern int __init
139 of_flat_dt_is_compatible(unsigned long node, const char *name);
140extern unsigned long __init of_get_flat_dt_root(void);
141
142/* For updating the device tree at runtime */
143extern void of_attach_node(struct device_node *);
144extern void of_detach_node(struct device_node *);
145
146/* Other Prototypes */ 31/* Other Prototypes */
147extern void finish_device_tree(void);
148extern void unflatten_device_tree(void);
149extern int early_uartlite_console(void); 32extern int early_uartlite_console(void);
150extern void early_init_devtree(void *);
151extern int machine_is_compatible(const char *compat);
152extern void print_properties(struct device_node *node);
153extern int prom_n_intr_cells(struct device_node *np);
154extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
155extern int prom_add_property(struct device_node *np, struct property *prop);
156extern int prom_remove_property(struct device_node *np, struct property *prop);
157extern int prom_update_property(struct device_node *np,
158 struct property *newprop,
159 struct property *oldprop);
160 33
161extern struct resource *request_OF_resource(struct device_node *node, 34#ifdef CONFIG_PCI
162 int index, const char *name_postfix); 35/*
163extern int release_OF_resource(struct device_node *node, int index); 36 * PCI <-> OF matching functions
37 * (XXX should these be here?)
38 */
39struct pci_bus;
40struct pci_dev;
41extern int pci_device_from_OF_node(struct device_node *node,
42 u8 *bus, u8 *devfn);
43extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
44 int devfn);
45extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
46extern void pci_create_OF_bus_map(void);
47#endif
164 48
165/* 49/*
166 * OF address retreival & translation 50 * OF address retreival & translation
167 */ 51 */
168 52
169/* Helper to read a big number; size is in cells (not bytes) */
170static inline u64 of_read_number(const u32 *cell, int size)
171{
172 u64 r = 0;
173 while (size--)
174 r = (r << 32) | *(cell++);
175 return r;
176}
177
178/* Like of_read_number, but we want an unsigned long result */
179#define of_read_ulong(cell, size) of_read_number(cell, size)
180
181/* Translate an OF address block into a CPU physical address 53/* Translate an OF address block into a CPU physical address
182 */ 54 */
183extern u64 of_translate_address(struct device_node *np, const u32 *addr); 55extern u64 of_translate_address(struct device_node *np, const u32 *addr);
@@ -305,12 +177,6 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
305 */ 177 */
306extern void __iomem *of_iomap(struct device_node *device, int index); 178extern void __iomem *of_iomap(struct device_node *device, int index);
307 179
308/*
309 * NB: This is here while we transition from using asm/prom.h
310 * to linux/of.h
311 */
312#include <linux/of.h>
313
314#endif /* __ASSEMBLY__ */ 180#endif /* __ASSEMBLY__ */
315#endif /* __KERNEL__ */ 181#endif /* __KERNEL__ */
316#endif /* _ASM_MICROBLAZE_PROM_H */ 182#endif /* _ASM_MICROBLAZE_PROM_H */
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h
index a917dc517736..d74dbfb92c04 100644
--- a/arch/microblaze/include/asm/ptrace.h
+++ b/arch/microblaze/include/asm/ptrace.h
@@ -54,6 +54,7 @@ struct pt_regs {
54 int pt_mode; 54 int pt_mode;
55}; 55};
56 56
57#ifdef __KERNEL__
57#define kernel_mode(regs) ((regs)->pt_mode) 58#define kernel_mode(regs) ((regs)->pt_mode)
58#define user_mode(regs) (!kernel_mode(regs)) 59#define user_mode(regs) (!kernel_mode(regs))
59 60
@@ -62,6 +63,19 @@ struct pt_regs {
62 63
63void show_regs(struct pt_regs *); 64void show_regs(struct pt_regs *);
64 65
66#else /* __KERNEL__ */
67
68/* pt_regs offsets used by gdbserver etc in ptrace syscalls */
69#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t))
70#define PT_PC (32 * sizeof(microblaze_reg_t))
71#define PT_MSR (33 * sizeof(microblaze_reg_t))
72#define PT_EAR (34 * sizeof(microblaze_reg_t))
73#define PT_ESR (35 * sizeof(microblaze_reg_t))
74#define PT_FSR (36 * sizeof(microblaze_reg_t))
75#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t))
76
77#endif /* __KERNEL */
78
65#endif /* __ASSEMBLY__ */ 79#endif /* __ASSEMBLY__ */
66 80
67#endif /* _ASM_MICROBLAZE_PTRACE_H */ 81#endif /* _ASM_MICROBLAZE_PTRACE_H */
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 66f1b30dd097..e38abc7714b6 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -76,20 +76,23 @@ struct pvr_s {
76#define PVR3_FSL_LINKS_MASK 0x00000380 76#define PVR3_FSL_LINKS_MASK 0x00000380
77 77
78/* ICache config PVR masks */ 78/* ICache config PVR masks */
79#define PVR4_USE_ICACHE_MASK 0x80000000 79#define PVR4_USE_ICACHE_MASK 0x80000000 /* ICU */
80#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000 80#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* ICTS */
81#define PVR4_ICACHE_USE_FSL_MASK 0x02000000 81#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000 /* ICW */
82#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000 82#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000 /* ICLL */
83#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000 83#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000 /* ICBS */
84#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000 84#define PVR4_ICACHE_ALWAYS_USED 0x00008000 /* IAU */
85#define PVR4_ICACHE_INTERFACE 0x00002000 /* ICI */
85 86
86/* DCache config PVR masks */ 87/* DCache config PVR masks */
87#define PVR5_USE_DCACHE_MASK 0x80000000 88#define PVR5_USE_DCACHE_MASK 0x80000000 /* DCU */
88#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000 89#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* DCTS */
89#define PVR5_DCACHE_USE_FSL_MASK 0x02000000 90#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000 /* DCW */
90#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000 91#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000 /* DCLL */
91#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000 92#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000 /* DCBS */
92#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000 93#define PVR5_DCACHE_ALWAYS_USED 0x00008000 /* DAU */
94#define PVR5_DCACHE_USE_WRITEBACK 0x00004000 /* DWB */
95#define PVR5_DCACHE_INTERFACE 0x00002000 /* DCI */
93 96
94/* ICache base address PVR mask */ 97/* ICache base address PVR mask */
95#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF 98#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
@@ -178,11 +181,14 @@ struct pvr_s {
178 ((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26) 181 ((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
179#define PVR_DCACHE_USE_FSL(pvr) (pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK) 182#define PVR_DCACHE_USE_FSL(pvr) (pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
180#define PVR_DCACHE_ALLOW_WR(pvr) (pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK) 183#define PVR_DCACHE_ALLOW_WR(pvr) (pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
184/* FIXME two shifts on one line needs any comment */
181#define PVR_DCACHE_LINE_LEN(pvr) \ 185#define PVR_DCACHE_LINE_LEN(pvr) \
182 (1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21)) 186 (1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
183#define PVR_DCACHE_BYTE_SIZE(pvr) \ 187#define PVR_DCACHE_BYTE_SIZE(pvr) \
184 (1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16)) 188 (1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
185 189
190#define PVR_DCACHE_USE_WRITEBACK(pvr) \
191 ((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
186 192
187#define PVR_ICACHE_BASEADDR(pvr) (pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK) 193#define PVR_ICACHE_BASEADDR(pvr) (pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
188#define PVR_ICACHE_HIGHADDR(pvr) (pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK) 194#define PVR_ICACHE_HIGHADDR(pvr) (pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
deleted file mode 100644
index 0e7102c3fb11..000000000000
--- a/arch/microblaze/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_SEGMENT_H
12#define _ASM_MICROBLAZE_SEGMENT_H
13
14# ifndef __ASSEMBLY__
15
16typedef struct {
17 unsigned long seg;
18} mm_segment_t;
19
20/*
21 * On Microblaze the fs value is actually the top of the corresponding
22 * address space.
23 *
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 *
30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
31 */
32# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34# ifndef CONFIG_MMU
35# define KERNEL_DS MAKE_MM_SEG(0)
36# define USER_DS KERNEL_DS
37# else
38# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
40# endif
41
42# define get_ds() (KERNEL_DS)
43# define get_fs() (current_thread_info()->addr_limit)
44# define set_fs(val) (current_thread_info()->addr_limit = (val))
45
46# define segment_eq(a, b) ((a).seg == (b).seg)
47
48# endif /* __ASSEMBLY__ */
49#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index ed67c9ed15b8..7f31394985e0 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -35,6 +35,8 @@ extern void mmu_reset(void);
35extern void early_console_reg_tlb_alloc(unsigned int addr); 35extern void early_console_reg_tlb_alloc(unsigned int addr);
36# endif /* CONFIG_MMU */ 36# endif /* CONFIG_MMU */
37 37
38extern void of_platform_reset_gpio_probe(void);
39
38void time_init(void); 40void time_init(void);
39void init_IRQ(void); 41void init_IRQ(void);
40void machine_early_init(const char *cmdline, unsigned int ram, 42void machine_early_init(const char *cmdline, unsigned int ram,
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index b1ed61590660..59efb3fef957 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -16,6 +16,8 @@
16#include <asm-generic/cmpxchg.h> 16#include <asm-generic/cmpxchg.h>
17#include <asm-generic/cmpxchg-local.h> 17#include <asm-generic/cmpxchg-local.h>
18 18
19#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
20
19struct task_struct; 21struct task_struct;
20struct thread_info; 22struct thread_info;
21 23
@@ -85,6 +87,9 @@ void free_initmem(void);
85extern char *klimit; 87extern char *klimit;
86extern void ret_from_fork(void); 88extern void ret_from_fork(void);
87 89
90extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
91extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
92
88#ifdef CONFIG_DEBUG_FS 93#ifdef CONFIG_DEBUG_FS
89extern struct dentry *of_debugfs_root; 94extern struct dentry *of_debugfs_root;
90#endif 95#endif
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 6e92885d381a..b2ca80f64640 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -19,7 +19,6 @@
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20# include <linux/types.h> 20# include <linux/types.h>
21# include <asm/processor.h> 21# include <asm/processor.h>
22# include <asm/segment.h>
23 22
24/* 23/*
25 * low level task data that entry.S needs immediate access to 24 * low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@ struct cpu_context {
60 __u32 fsr; 59 __u32 fsr;
61}; 60};
62 61
62typedef struct {
63 unsigned long seg;
64} mm_segment_t;
65
63struct thread_info { 66struct thread_info {
64 struct task_struct *task; /* main task structure */ 67 struct task_struct *task; /* main task structure */
65 struct exec_domain *exec_domain; /* execution domain */ 68 struct exec_domain *exec_domain; /* execution domain */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index eb31a0e8a772..2e1353c2d18d 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -23,7 +23,8 @@
23extern void _tlbie(unsigned long address); 23extern void _tlbie(unsigned long address);
24extern void _tlbia(void); 24extern void _tlbia(void);
25 25
26#define __tlbia() _tlbia() 26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
27#define __tlbie(x) { _tlbie(x); }
27 28
28static inline void local_flush_tlb_all(void) 29static inline void local_flush_tlb_all(void)
29 { __tlbia(); } 30 { __tlbia(); }
@@ -31,14 +32,14 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
31 { __tlbia(); } 32 { __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma, 33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33 unsigned long vmaddr) 34 unsigned long vmaddr)
34 { _tlbie(vmaddr); } 35 { __tlbie(vmaddr); }
35static inline void local_flush_tlb_range(struct vm_area_struct *vma, 36static inline void local_flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end) 37 unsigned long start, unsigned long end)
37 { __tlbia(); } 38 { __tlbia(); }
38 39
39#define flush_tlb_kernel_range(start, end) do { } while (0) 40#define flush_tlb_kernel_range(start, end) do { } while (0)
40 41
41#define update_mmu_cache(vma, addr, pte) do { } while (0) 42#define update_mmu_cache(vma, addr, ptep) do { } while (0)
42 43
43#define flush_tlb_all local_flush_tlb_all 44#define flush_tlb_all local_flush_tlb_all
44#define flush_tlb_mm local_flush_tlb_mm 45#define flush_tlb_mm local_flush_tlb_mm
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 5431b4631a7a..26460d15b338 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -22,101 +22,73 @@
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/segment.h>
26#include <linux/string.h> 25#include <linux/string.h>
27 26
28#define VERIFY_READ 0 27#define VERIFY_READ 0
29#define VERIFY_WRITE 1 28#define VERIFY_WRITE 1
30 29
31#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) 30/*
32 31 * On Microblaze the fs value is actually the top of the corresponding
33#ifndef CONFIG_MMU 32 * address space.
34 33 *
35extern int ___range_ok(unsigned long addr, unsigned long size); 34 * The fs value determines whether argument validity checking should be
36 35 * performed or not. If get_fs() == USER_DS, checking is performed, with
37#define __range_ok(addr, size) \ 36 * get_fs() == KERNEL_DS, checking is bypassed.
38 ___range_ok((unsigned long)(addr), (unsigned long)(size)) 37 *
39 38 * For historical reasons, these macros are grossly misnamed.
40#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 39 *
41#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) 40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
42 41 */
43/* Undefined function to trigger linker error */ 42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
44extern int bad_user_access_length(void);
45
46/* FIXME this is function for optimalization -> memcpy */
47#define __get_user(var, ptr) \
48({ \
49 int __gu_err = 0; \
50 switch (sizeof(*(ptr))) { \
51 case 1: \
52 case 2: \
53 case 4: \
54 (var) = *(ptr); \
55 break; \
56 case 8: \
57 memcpy((void *) &(var), (ptr), 8); \
58 break; \
59 default: \
60 (var) = 0; \
61 __gu_err = __get_user_bad(); \
62 break; \
63 } \
64 __gu_err; \
65})
66 43
67#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
68 51
69/* FIXME is not there defined __pu_val */ 52# define get_ds() (KERNEL_DS)
70#define __put_user(var, ptr) \ 53# define get_fs() (current_thread_info()->addr_limit)
71({ \ 54# define set_fs(val) (current_thread_info()->addr_limit = (val))
72 int __pu_err = 0; \
73 switch (sizeof(*(ptr))) { \
74 case 1: \
75 case 2: \
76 case 4: \
77 *(ptr) = (var); \
78 break; \
79 case 8: { \
80 typeof(*(ptr)) __pu_val = (var); \
81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
82 } \
83 break; \
84 default: \
85 __pu_err = __put_user_bad(); \
86 break; \
87 } \
88 __pu_err; \
89})
90 55
91#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) 56# define segment_eq(a, b) ((a).seg == (b).seg)
92 57
93#define put_user(x, ptr) __put_user((x), (ptr)) 58/*
94#define get_user(x, ptr) __get_user((x), (ptr)) 59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
63 * what to do.
64 *
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
69 */
70struct exception_table_entry {
71 unsigned long insn, fixup;
72};
95 73
96#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) 74/* Returns 0 if exception not found and fixup otherwise. */
97#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) 75extern unsigned long search_exception_table(unsigned long);
98 76
99#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) 77#ifndef CONFIG_MMU
100#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
101#define __copy_to_user_inatomic(to, from, n) \
102 (__copy_to_user((to), (from), (n)))
103#define __copy_from_user_inatomic(to, from, n) \
104 (__copy_from_user((to), (from), (n)))
105 78
106static inline unsigned long clear_user(void *addr, unsigned long size) 79/* Check against bounds of physical memory */
80static inline int ___range_ok(unsigned long addr, unsigned long size)
107{ 81{
108 if (access_ok(VERIFY_WRITE, addr, size)) 82 return ((addr < memory_start) ||
109 size = __clear_user(addr, size); 83 ((addr + size) > memory_end));
110 return size;
111} 84}
112 85
113/* Returns 0 if exception not found and fixup otherwise. */ 86#define __range_ok(addr, size) \
114extern unsigned long search_exception_table(unsigned long); 87 ___range_ok((unsigned long)(addr), (unsigned long)(size))
115 88
116extern long strncpy_from_user(char *dst, const char *src, long count); 89#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
117extern long strnlen_user(const char *src, long count);
118 90
119#else /* CONFIG_MMU */ 91#else
120 92
121/* 93/*
122 * Address is valid if: 94 * Address is valid if:
@@ -129,22 +101,119 @@ extern long strnlen_user(const char *src, long count);
129/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", 101/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */ 102 type?"WRITE":"READ",addr,size,get_fs().seg)) */
131 103
132/* 104#endif
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
136 */
137 105
138#define get_user(x, ptr) \ 106#ifdef CONFIG_MMU
139({ \ 107# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ 108# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
141 ? __get_user((x), (ptr)) : -EFAULT; \ 109#else
110# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
111# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
112#endif
113
114extern unsigned long __copy_tofrom_user(void __user *to,
115 const void __user *from, unsigned long size);
116
117/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
118static inline unsigned long __must_check __clear_user(void __user *to,
119 unsigned long n)
120{
121 /* normal memset with two words to __ex_table */
122 __asm__ __volatile__ ( \
123 "1: sb r0, %2, r0;" \
124 " addik %0, %0, -1;" \
125 " bneid %0, 1b;" \
126 " addik %2, %2, 1;" \
127 "2: " \
128 __EX_TABLE_SECTION \
129 ".word 1b,2b;" \
130 ".previous;" \
131 : "=r"(n) \
132 : "0"(n), "r"(to)
133 );
134 return n;
135}
136
137static inline unsigned long __must_check clear_user(void __user *to,
138 unsigned long n)
139{
140 might_sleep();
141 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
142 return n;
143
144 return __clear_user(to, n);
145}
146
147/* put_user and get_user macros */
148extern long __user_bad(void);
149
150#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
151({ \
152 __asm__ __volatile__ ( \
153 "1:" insn " %1, %2, r0;" \
154 " addk %0, r0, r0;" \
155 "2: " \
156 __FIXUP_SECTION \
157 "3: brid 2b;" \
158 " addik %0, r0, %3;" \
159 ".previous;" \
160 __EX_TABLE_SECTION \
161 ".word 1b,3b;" \
162 ".previous;" \
163 : "=&r"(__gu_err), "=r"(__gu_val) \
164 : "r"(__gu_ptr), "i"(-EFAULT) \
165 ); \
142}) 166})
143 167
144#define put_user(x, ptr) \ 168/**
169 * get_user: - Get a simple variable from user space.
170 * @x: Variable to store result.
171 * @ptr: Source address, in user space.
172 *
173 * Context: User context only. This function may sleep.
174 *
175 * This macro copies a single simple variable from user space to kernel
176 * space. It supports simple types like char and int, but not larger
177 * data types like structures or arrays.
178 *
179 * @ptr must have pointer-to-simple-variable type, and the result of
180 * dereferencing @ptr must be assignable to @x without a cast.
181 *
182 * Returns zero on success, or -EFAULT on error.
183 * On error, the variable @x is set to zero.
184 */
185#define get_user(x, ptr) \
186 __get_user_check((x), (ptr), sizeof(*(ptr)))
187
188#define __get_user_check(x, ptr, size) \
145({ \ 189({ \
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ 190 unsigned long __gu_val = 0; \
147 ? __put_user((x), (ptr)) : -EFAULT; \ 191 const typeof(*(ptr)) __user *__gu_addr = (ptr); \
192 int __gu_err = 0; \
193 \
194 if (access_ok(VERIFY_READ, __gu_addr, size)) { \
195 switch (size) { \
196 case 1: \
197 __get_user_asm("lbu", __gu_addr, __gu_val, \
198 __gu_err); \
199 break; \
200 case 2: \
201 __get_user_asm("lhu", __gu_addr, __gu_val, \
202 __gu_err); \
203 break; \
204 case 4: \
205 __get_user_asm("lw", __gu_addr, __gu_val, \
206 __gu_err); \
207 break; \
208 default: \
209 __gu_err = __user_bad(); \
210 break; \
211 } \
212 } else { \
213 __gu_err = -EFAULT; \
214 } \
215 x = (typeof(*(ptr)))__gu_val; \
216 __gu_err; \
148}) 217})
149 218
150#define __get_user(x, ptr) \ 219#define __get_user(x, ptr) \
@@ -163,28 +232,102 @@ extern long strnlen_user(const char *src, long count);
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ 232 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
164 break; \ 233 break; \
165 default: \ 234 default: \
166 __gu_val = 0; __gu_err = -EINVAL; \ 235 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
167 } \ 236 } \
168 x = (__typeof__(*(ptr))) __gu_val; \ 237 x = (__typeof__(*(ptr))) __gu_val; \
169 __gu_err; \ 238 __gu_err; \
170}) 239})
171 240
172#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 241
242#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
243({ \
244 __asm__ __volatile__ ( \
245 "1:" insn " %1, %2, r0;" \
246 " addk %0, r0, r0;" \
247 "2: " \
248 __FIXUP_SECTION \
249 "3: brid 2b;" \
250 " addik %0, r0, %3;" \
251 ".previous;" \
252 __EX_TABLE_SECTION \
253 ".word 1b,3b;" \
254 ".previous;" \
255 : "=&r"(__gu_err) \
256 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
257 ); \
258})
259
260#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
261({ \
262 __asm__ __volatile__ (" lwi %0, %1, 0;" \
263 "1: swi %0, %2, 0;" \
264 " lwi %0, %1, 4;" \
265 "2: swi %0, %2, 4;" \
266 " addk %0, r0, r0;" \
267 "3: " \
268 __FIXUP_SECTION \
269 "4: brid 3b;" \
270 " addik %0, r0, %3;" \
271 ".previous;" \
272 __EX_TABLE_SECTION \
273 ".word 1b,4b,2b,4b;" \
274 ".previous;" \
275 : "=&r"(__gu_err) \
276 : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
277 ); \
278})
279
280/**
281 * put_user: - Write a simple value into user space.
282 * @x: Value to copy to user space.
283 * @ptr: Destination address, in user space.
284 *
285 * Context: User context only. This function may sleep.
286 *
287 * This macro copies a single simple value from kernel space to user
288 * space. It supports simple types like char and int, but not larger
289 * data types like structures or arrays.
290 *
291 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
292 * to the result of dereferencing @ptr.
293 *
294 * Returns zero on success, or -EFAULT on error.
295 */
296#define put_user(x, ptr) \
297 __put_user_check((x), (ptr), sizeof(*(ptr)))
298
299#define __put_user_check(x, ptr, size) \
173({ \ 300({ \
174 __asm__ __volatile__ ( \ 301 typeof(*(ptr)) __pu_val; \
175 "1:" insn " %1, %2, r0; \ 302 typeof(*(ptr)) __user *__pu_addr = (ptr); \
176 addk %0, r0, r0; \ 303 int __pu_err = 0; \
177 2: \ 304 \
178 .section .fixup,\"ax\"; \ 305 __pu_val = (x); \
179 3: brid 2b; \ 306 if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
180 addik %0, r0, %3; \ 307 switch (size) { \
181 .previous; \ 308 case 1: \
182 .section __ex_table,\"a\"; \ 309 __put_user_asm("sb", __pu_addr, __pu_val, \
183 .word 1b,3b; \ 310 __pu_err); \
184 .previous;" \ 311 break; \
185 : "=r"(__gu_err), "=r"(__gu_val) \ 312 case 2: \
186 : "r"(__gu_ptr), "i"(-EFAULT) \ 313 __put_user_asm("sh", __pu_addr, __pu_val, \
187 ); \ 314 __pu_err); \
315 break; \
316 case 4: \
317 __put_user_asm("sw", __pu_addr, __pu_val, \
318 __pu_err); \
319 break; \
320 case 8: \
321 __put_user_asm_8(__pu_addr, __pu_val, __pu_err);\
322 break; \
323 default: \
324 __pu_err = __user_bad(); \
325 break; \
326 } \
327 } else { \
328 __pu_err = -EFAULT; \
329 } \
330 __pu_err; \
188}) 331})
189 332
190#define __put_user(x, ptr) \ 333#define __put_user(x, ptr) \
@@ -195,7 +338,7 @@ extern long strnlen_user(const char *src, long count);
195 case 1: \ 338 case 1: \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ 339 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
197 break; \ 340 break; \
198 case 2: \ 341 case 2: \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ 342 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
200 break; \ 343 break; \
201 case 4: \ 344 case 4: \
@@ -205,121 +348,70 @@ extern long strnlen_user(const char *src, long count);
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \ 348 __put_user_asm_8((ptr), __gu_val, __gu_err); \
206 break; \ 349 break; \
207 default: \ 350 default: \
208 __gu_err = -EINVAL; \ 351 /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
209 } \ 352 } \
210 __gu_err; \ 353 __gu_err; \
211}) 354})
212 355
213#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
214({ \
215__asm__ __volatile__ (" lwi %0, %1, 0; \
216 1: swi %0, %2, 0; \
217 lwi %0, %1, 4; \
218 2: swi %0, %2, 4; \
219 addk %0,r0,r0; \
220 3: \
221 .section .fixup,\"ax\"; \
222 4: brid 3b; \
223 addik %0, r0, %3; \
224 .previous; \
225 .section __ex_table,\"a\"; \
226 .word 1b,4b,2b,4b; \
227 .previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(&__gu_val), \
230 "r"(__gu_ptr), "i"(-EFAULT) \
231 ); \
232})
233 356
234#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 357/* copy_to_from_user */
235({ \ 358#define __copy_from_user(to, from, n) \
236 __asm__ __volatile__ ( \ 359 __copy_tofrom_user((__force void __user *)(to), \
237 "1:" insn " %1, %2, r0; \ 360 (void __user *)(from), (n))
238 addk %0, r0, r0; \ 361#define __copy_from_user_inatomic(to, from, n) \
239 2: \ 362 copy_from_user((to), (from), (n))
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err) \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251 363
252/* 364static inline long copy_from_user(void *to,
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. 365 const void __user *from, unsigned long n)
254 */
255static inline int clear_user(char *to, int size)
256{ 366{
257 if (size && access_ok(VERIFY_WRITE, to, size)) { 367 might_sleep();
258 __asm__ __volatile__ (" \ 368 if (access_ok(VERIFY_READ, from, n))
259 1: \ 369 return __copy_from_user(to, from, n);
260 sb r0, %2, r0; \ 370 return n;
261 addik %0, %0, -1; \
262 bneid %0, 1b; \
263 addik %2, %2, 1; \
264 2: \
265 .section __ex_table,\"a\"; \
266 .word 1b,2b; \
267 .section .text;" \
268 : "=r"(size) \
269 : "0"(size), "r"(to)
270 );
271 }
272 return size;
273} 371}
274 372
275extern unsigned long __copy_tofrom_user(void __user *to, 373#define __copy_to_user(to, from, n) \
276 const void __user *from, unsigned long size); 374 __copy_tofrom_user((void __user *)(to), \
277 375 (__force const void __user *)(from), (n))
278#define copy_to_user(to, from, n) \
279 (access_ok(VERIFY_WRITE, (to), (n)) ? \
280 __copy_tofrom_user((void __user *)(to), \
281 (__force const void __user *)(from), (n)) \
282 : -EFAULT)
283
284#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
285#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) 376#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
286 377
287#define copy_from_user(to, from, n) \ 378static inline long copy_to_user(void __user *to,
288 (access_ok(VERIFY_READ, (from), (n)) ? \ 379 const void *from, unsigned long n)
289 __copy_tofrom_user((__force void __user *)(to), \ 380{
290 (void __user *)(from), (n)) \ 381 might_sleep();
291 : -EFAULT) 382 if (access_ok(VERIFY_WRITE, to, n))
292 383 return __copy_to_user(to, from, n);
293#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) 384 return n;
294#define __copy_from_user_inatomic(to, from, n) \ 385}
295 copy_from_user((to), (from), (n))
296 386
387/*
388 * Copy a null terminated string from userspace.
389 */
297extern int __strncpy_user(char *to, const char __user *from, int len); 390extern int __strncpy_user(char *to, const char __user *from, int len);
298extern int __strnlen_user(const char __user *sstr, int len);
299 391
300#define strncpy_from_user(to, from, len) \ 392#define __strncpy_from_user __strncpy_user
301 (access_ok(VERIFY_READ, from, 1) ? \
302 __strncpy_user(to, from, len) : -EFAULT)
303#define strnlen_user(str, len) \
304 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
305 393
306#endif /* CONFIG_MMU */ 394static inline long
395strncpy_from_user(char *dst, const char __user *src, long count)
396{
397 if (!access_ok(VERIFY_READ, src, 1))
398 return -EFAULT;
399 return __strncpy_from_user(dst, src, count);
400}
307 401
308/* 402/*
309 * The exception table consists of pairs of addresses: the first is the 403 * Return the size of a string (including the ending 0)
310 * address of an instruction that is allowed to fault, and the second is
311 * the address at which the program should continue. No registers are
312 * modified, so it is entirely up to the continuation code to figure out
313 * what to do.
314 * 404 *
315 * All the routines below use bits of fixup code that are out of line 405 * Return 0 on exception, a value greater than N if too long
316 * with the main instruction path. This means when everything is well,
317 * we don't even have to jump over them. Further, they do not intrude
318 * on our cache or tlb entries.
319 */ 406 */
320struct exception_table_entry { 407extern int __strnlen_user(const char __user *sstr, int len);
321 unsigned long insn, fixup; 408
322}; 409static inline long strnlen_user(const char __user *src, long n)
410{
411 if (!access_ok(VERIFY_READ, src, 1))
412 return 0;
413 return __strnlen_user(src, n);
414}
323 415
324#endif /* __ASSEMBLY__ */ 416#endif /* __ASSEMBLY__ */
325#endif /* __KERNEL__ */ 417#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index cb05a07e55e9..2b67e92a773c 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -377,13 +377,14 @@
377#define __NR_shutdown 359 /* new */ 377#define __NR_shutdown 359 /* new */
378#define __NR_sendmsg 360 /* new */ 378#define __NR_sendmsg 360 /* new */
379#define __NR_recvmsg 361 /* new */ 379#define __NR_recvmsg 361 /* new */
380#define __NR_accept04 362 /* new */ 380#define __NR_accept4 362 /* new */
381#define __NR_preadv 363 /* new */ 381#define __NR_preadv 363 /* new */
382#define __NR_pwritev 364 /* new */ 382#define __NR_pwritev 364 /* new */
383#define __NR_rt_tgsigqueueinfo 365 /* new */ 383#define __NR_rt_tgsigqueueinfo 365 /* new */
384#define __NR_perf_event_open 366 /* new */ 384#define __NR_perf_event_open 366 /* new */
385#define __NR_recvmmsg 367 /* new */
385 386
386#define __NR_syscalls 367 387#define __NR_syscalls 368
387 388
388#ifdef __KERNEL__ 389#ifdef __KERNEL__
389#ifndef __ASSEMBLY__ 390#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index d487729683de..e51bc1520825 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -2,12 +2,22 @@
2# Makefile 2# Makefile
3# 3#
4 4
5ifdef CONFIG_FUNCTION_TRACER
6# Do not trace early boot code and low level code
7CFLAGS_REMOVE_timer.o = -pg
8CFLAGS_REMOVE_intc.o = -pg
9CFLAGS_REMOVE_early_printk.o = -pg
10CFLAGS_REMOVE_selfmod.o = -pg
11CFLAGS_REMOVE_heartbeat.o = -pg
12CFLAGS_REMOVE_ftrace.o = -pg
13endif
14
5extra-y := head.o vmlinux.lds 15extra-y := head.o vmlinux.lds
6 16
7obj-y += exceptions.o \ 17obj-y += dma.o exceptions.o \
8 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ 18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
9 of_platform.o process.o prom.o prom_parse.o ptrace.o \ 19 of_platform.o process.o prom.o prom_parse.o ptrace.o \
10 setup.o signal.o sys_microblaze.o timer.o traps.o 20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
11 21
12obj-y += cpu/ 22obj-y += cpu/
13 23
@@ -16,5 +26,7 @@ obj-$(CONFIG_SELFMOD) += selfmod.o
16obj-$(CONFIG_HEART_BEAT) += heartbeat.o 26obj-$(CONFIG_HEART_BEAT) += heartbeat.o
17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o 27obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
18obj-$(CONFIG_MMU) += misc.o 28obj-$(CONFIG_MMU) += misc.o
29obj-$(CONFIG_STACKTRACE) += stacktrace.o
30obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
19 31
20obj-y += entry$(MMU).o 32obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 7bc7b68f97db..c1b459c97571 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/thread_info.h> 17#include <linux/thread_info.h>
18#include <linux/kbuild.h> 18#include <linux/kbuild.h>
19#include <asm/cpuinfo.h>
19 20
20int main(int argc, char *argv[]) 21int main(int argc, char *argv[])
21{ 22{
@@ -90,6 +91,7 @@ int main(int argc, char *argv[])
90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 91 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 92 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 93 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
94 DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
93 BLANK(); 95 BLANK();
94 96
95 /* struct cpu_context */ 97 /* struct cpu_context */
diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile
index 20646e549271..59cc7bceaf8c 100644
--- a/arch/microblaze/kernel/cpu/Makefile
+++ b/arch/microblaze/kernel/cpu/Makefile
@@ -2,6 +2,10 @@
2# Build the appropriate CPU version support 2# Build the appropriate CPU version support
3# 3#
4 4
5ifdef CONFIG_FUNCTION_TRACER
6CFLAGS_REMOVE_cache.o = -pg
7endif
8
5EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \ 9EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \
6 -DCPU_REV=$(CPU_REV) 10 -DCPU_REV=$(CPU_REV)
7 11
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index af866a450125..109876e8d643 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
@@ -13,243 +13,655 @@
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <linux/cache.h> 14#include <linux/cache.h>
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h>
16 17
17/* Exported functions */ 18static inline void __enable_icache_msr(void)
19{
20 __asm__ __volatile__ (" msrset r0, %0; \
21 nop; " \
22 : : "i" (MSR_ICE) : "memory");
23}
24
25static inline void __disable_icache_msr(void)
26{
27 __asm__ __volatile__ (" msrclr r0, %0; \
28 nop; " \
29 : : "i" (MSR_ICE) : "memory");
30}
18 31
19void _enable_icache(void) 32static inline void __enable_dcache_msr(void)
20{ 33{
21 if (cpuinfo.use_icache) { 34 __asm__ __volatile__ (" msrset r0, %0; \
22#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 35 nop; " \
23 __asm__ __volatile__ (" \ 36 : \
24 msrset r0, %0; \ 37 : "i" (MSR_DCE) \
25 nop; " \
26 : \
27 : "i" (MSR_ICE) \
28 : "memory"); 38 : "memory");
29#else
30 __asm__ __volatile__ (" \
31 mfs r12, rmsr; \
32 nop; \
33 ori r12, r12, %0; \
34 mts rmsr, r12; \
35 nop; " \
36 : \
37 : "i" (MSR_ICE) \
38 : "memory", "r12");
39#endif
40 }
41} 39}
42 40
43void _disable_icache(void) 41static inline void __disable_dcache_msr(void)
44{ 42{
45 if (cpuinfo.use_icache) { 43 __asm__ __volatile__ (" msrclr r0, %0; \
46#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 44 nop; " \
47 __asm__ __volatile__ (" \ 45 : \
48 msrclr r0, %0; \ 46 : "i" (MSR_DCE) \
49 nop; " \
50 : \
51 : "i" (MSR_ICE) \
52 : "memory"); 47 : "memory");
53#else 48}
54 __asm__ __volatile__ (" \ 49
55 mfs r12, rmsr; \ 50static inline void __enable_icache_nomsr(void)
56 nop; \ 51{
57 andi r12, r12, ~%0; \ 52 __asm__ __volatile__ (" mfs r12, rmsr; \
58 mts rmsr, r12; \ 53 nop; \
59 nop; " \ 54 ori r12, r12, %0; \
60 : \ 55 mts rmsr, r12; \
61 : "i" (MSR_ICE) \ 56 nop; " \
57 : \
58 : "i" (MSR_ICE) \
62 : "memory", "r12"); 59 : "memory", "r12");
63#endif
64 }
65} 60}
66 61
67void _invalidate_icache(unsigned int addr) 62static inline void __disable_icache_nomsr(void)
68{ 63{
69 if (cpuinfo.use_icache) { 64 __asm__ __volatile__ (" mfs r12, rmsr; \
70 __asm__ __volatile__ (" \ 65 nop; \
71 wic %0, r0" \ 66 andi r12, r12, ~%0; \
72 : \ 67 mts rmsr, r12; \
73 : "r" (addr)); 68 nop; " \
74 } 69 : \
70 : "i" (MSR_ICE) \
71 : "memory", "r12");
75} 72}
76 73
77void _enable_dcache(void) 74static inline void __enable_dcache_nomsr(void)
78{ 75{
79 if (cpuinfo.use_dcache) { 76 __asm__ __volatile__ (" mfs r12, rmsr; \
80#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 77 nop; \
81 __asm__ __volatile__ (" \ 78 ori r12, r12, %0; \
82 msrset r0, %0; \ 79 mts rmsr, r12; \
83 nop; " \ 80 nop; " \
84 : \ 81 : \
85 : "i" (MSR_DCE) \ 82 : "i" (MSR_DCE) \
86 : "memory"); 83 : "memory", "r12");
87#else 84}
88 __asm__ __volatile__ (" \ 85
89 mfs r12, rmsr; \ 86static inline void __disable_dcache_nomsr(void)
90 nop; \ 87{
91 ori r12, r12, %0; \ 88 __asm__ __volatile__ (" mfs r12, rmsr; \
92 mts rmsr, r12; \ 89 nop; \
93 nop; " \ 90 andi r12, r12, ~%0; \
94 : \ 91 mts rmsr, r12; \
95 : "i" (MSR_DCE) \ 92 nop; " \
93 : \
94 : "i" (MSR_DCE) \
96 : "memory", "r12"); 95 : "memory", "r12");
96}
97
98
99/* Helper macro for computing the limits of cache range loops
100 *
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
103 */
104#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
105do { \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
108 start &= align; \
109} while (0);
110
111/*
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
114 */
115#define CACHE_ALL_LOOP(cache_size, line_length, op) \
116do { \
117 unsigned int len = cache_size - line_length; \
118 int step = -line_length; \
119 WARN_ON(step >= 0); \
120 \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
122 bgtid %0, 1b; \
123 addk %0, %0, %1; \
124 " : : "r" (len), "r" (step) \
125 : "memory"); \
126} while (0);
127
128/* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
130 *
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to substract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
135 */
136#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
137do { \
138 int step = -line_length; \
139 int align = ~(line_length - 1); \
140 int count; \
141 end = ((end & align) == end) ? end - line_length : end & align; \
142 count = end - start; \
143 WARN_ON(count < 0); \
144 \
145 __asm__ __volatile__ (" 1: " #op " %0, %1; \
146 bgtid %1, 1b; \
147 addk %1, %1, %2; \
148 " : : "r" (start), "r" (count), \
149 "r" (step) : "memory"); \
150} while (0);
151
152/* It is used only first parameter for OP - for wic, wdc */
153#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
154do { \
155 int volatile temp; \
156 int align = ~(line_length - 1); \
157 end = ((end & align) == end) ? end - line_length : end & align; \
158 WARN_ON(end - start < 0); \
159 \
160 __asm__ __volatile__ (" 1: " #op " %1, r0; \
161 cmpu %0, %1, %2; \
162 bgtid %0, 1b; \
163 addk %1, %1, %3; \
164 " : : "r" (temp), "r" (start), "r" (end),\
165 "r" (line_length) : "memory"); \
166} while (0);
167
168#define ASM_LOOP
169
170static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
171{
172 unsigned long flags;
173#ifndef ASM_LOOP
174 int i;
97#endif 175#endif
98 } 176 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
177 (unsigned int)start, (unsigned int) end);
178
179 CACHE_LOOP_LIMITS(start, end,
180 cpuinfo.icache_line_length, cpuinfo.icache_size);
181
182 local_irq_save(flags);
183 __disable_icache_msr();
184
185#ifdef ASM_LOOP
186 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187#else
188 for (i = start; i < end; i += cpuinfo.icache_line_length)
189 __asm__ __volatile__ ("wic %0, r0;" \
190 : : "r" (i));
191#endif
192 __enable_icache_msr();
193 local_irq_restore(flags);
99} 194}
100 195
101void _disable_dcache(void) 196static void __flush_icache_range_nomsr_irq(unsigned long start,
197 unsigned long end)
102{ 198{
103#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 199 unsigned long flags;
104 __asm__ __volatile__ (" \ 200#ifndef ASM_LOOP
105 msrclr r0, %0; \ 201 int i;
106 nop; " \ 202#endif
107 : \ 203 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
108 : "i" (MSR_DCE) \ 204 (unsigned int)start, (unsigned int) end);
109 : "memory"); 205
206 CACHE_LOOP_LIMITS(start, end,
207 cpuinfo.icache_line_length, cpuinfo.icache_size);
208
209 local_irq_save(flags);
210 __disable_icache_nomsr();
211
212#ifdef ASM_LOOP
213 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
110#else 214#else
111 __asm__ __volatile__ (" \ 215 for (i = start; i < end; i += cpuinfo.icache_line_length)
112 mfs r12, rmsr; \ 216 __asm__ __volatile__ ("wic %0, r0;" \
113 nop; \ 217 : : "r" (i));
114 andi r12, r12, ~%0; \
115 mts rmsr, r12; \
116 nop; " \
117 : \
118 : "i" (MSR_DCE) \
119 : "memory", "r12");
120#endif 218#endif
219
220 __enable_icache_nomsr();
221 local_irq_restore(flags);
121} 222}
122 223
123void _invalidate_dcache(unsigned int addr) 224static void __flush_icache_range_noirq(unsigned long start,
225 unsigned long end)
124{ 226{
125 __asm__ __volatile__ (" \ 227#ifndef ASM_LOOP
126 wdc %0, r0" \ 228 int i;
127 : \ 229#endif
128 : "r" (addr)); 230 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
231 (unsigned int)start, (unsigned int) end);
232
233 CACHE_LOOP_LIMITS(start, end,
234 cpuinfo.icache_line_length, cpuinfo.icache_size);
235#ifdef ASM_LOOP
236 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237#else
238 for (i = start; i < end; i += cpuinfo.icache_line_length)
239 __asm__ __volatile__ ("wic %0, r0;" \
240 : : "r" (i));
241#endif
129} 242}
130 243
131void __invalidate_icache_all(void) 244static void __flush_icache_all_msr_irq(void)
132{ 245{
133 unsigned int i; 246 unsigned long flags;
134 unsigned flags; 247#ifndef ASM_LOOP
248 int i;
249#endif
250 pr_debug("%s\n", __func__);
135 251
136 if (cpuinfo.use_icache) { 252 local_irq_save(flags);
137 local_irq_save(flags); 253 __disable_icache_msr();
138 __disable_icache(); 254#ifdef ASM_LOOP
255 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256#else
257 for (i = 0; i < cpuinfo.icache_size;
258 i += cpuinfo.icache_line_length)
259 __asm__ __volatile__ ("wic %0, r0;" \
260 : : "r" (i));
261#endif
262 __enable_icache_msr();
263 local_irq_restore(flags);
264}
139 265
140 /* Just loop through cache size and invalidate, no need to add 266static void __flush_icache_all_nomsr_irq(void)
141 CACHE_BASE address */ 267{
142 for (i = 0; i < cpuinfo.icache_size; 268 unsigned long flags;
143 i += cpuinfo.icache_line) 269#ifndef ASM_LOOP
144 __invalidate_icache(i); 270 int i;
271#endif
272 pr_debug("%s\n", __func__);
145 273
146 __enable_icache(); 274 local_irq_save(flags);
147 local_irq_restore(flags); 275 __disable_icache_nomsr();
148 } 276#ifdef ASM_LOOP
277 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278#else
279 for (i = 0; i < cpuinfo.icache_size;
280 i += cpuinfo.icache_line_length)
281 __asm__ __volatile__ ("wic %0, r0;" \
282 : : "r" (i));
283#endif
284 __enable_icache_nomsr();
285 local_irq_restore(flags);
149} 286}
150 287
151void __invalidate_icache_range(unsigned long start, unsigned long end) 288static void __flush_icache_all_noirq(void)
152{ 289{
153 unsigned int i; 290#ifndef ASM_LOOP
154 unsigned flags; 291 int i;
155 unsigned int align; 292#endif
293 pr_debug("%s\n", __func__);
294#ifdef ASM_LOOP
295 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296#else
297 for (i = 0; i < cpuinfo.icache_size;
298 i += cpuinfo.icache_line_length)
299 __asm__ __volatile__ ("wic %0, r0;" \
300 : : "r" (i));
301#endif
302}
156 303
157 if (cpuinfo.use_icache) { 304static void __invalidate_dcache_all_msr_irq(void)
158 /* 305{
159 * No need to cover entire cache range, 306 unsigned long flags;
160 * just cover cache footprint 307#ifndef ASM_LOOP
161 */ 308 int i;
162 end = min(start + cpuinfo.icache_size, end); 309#endif
163 align = ~(cpuinfo.icache_line - 1); 310 pr_debug("%s\n", __func__);
164 start &= align; /* Make sure we are aligned */ 311
165 /* Push end up to the next cache line */ 312 local_irq_save(flags);
166 end = ((end & align) + cpuinfo.icache_line); 313 __disable_dcache_msr();
314#ifdef ASM_LOOP
315 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316#else
317 for (i = 0; i < cpuinfo.dcache_size;
318 i += cpuinfo.dcache_line_length)
319 __asm__ __volatile__ ("wdc %0, r0;" \
320 : : "r" (i));
321#endif
322 __enable_dcache_msr();
323 local_irq_restore(flags);
324}
167 325
168 local_irq_save(flags); 326static void __invalidate_dcache_all_nomsr_irq(void)
169 __disable_icache(); 327{
328 unsigned long flags;
329#ifndef ASM_LOOP
330 int i;
331#endif
332 pr_debug("%s\n", __func__);
170 333
171 for (i = start; i < end; i += cpuinfo.icache_line) 334 local_irq_save(flags);
172 __invalidate_icache(i); 335 __disable_dcache_nomsr();
336#ifdef ASM_LOOP
337 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338#else
339 for (i = 0; i < cpuinfo.dcache_size;
340 i += cpuinfo.dcache_line_length)
341 __asm__ __volatile__ ("wdc %0, r0;" \
342 : : "r" (i));
343#endif
344 __enable_dcache_nomsr();
345 local_irq_restore(flags);
346}
173 347
174 __enable_icache(); 348static void __invalidate_dcache_all_noirq_wt(void)
175 local_irq_restore(flags); 349{
176 } 350#ifndef ASM_LOOP
351 int i;
352#endif
353 pr_debug("%s\n", __func__);
354#ifdef ASM_LOOP
355 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
356#else
357 for (i = 0; i < cpuinfo.dcache_size;
358 i += cpuinfo.dcache_line_length)
359 __asm__ __volatile__ ("wdc %0, r0;" \
360 : : "r" (i));
361#endif
177} 362}
178 363
179void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page) 364/* FIXME It is blindly invalidation as is expected
365 * but can't be called on noMMU in microblaze_cache_init below
366 *
367 * MS: noMMU kernel won't boot if simple wdc is used
368 * The reason should be that there are discared data which kernel needs
369 */
370static void __invalidate_dcache_all_wb(void)
180{ 371{
181 __invalidate_icache_all(); 372#ifndef ASM_LOOP
373 int i;
374#endif
375 pr_debug("%s\n", __func__);
376#ifdef ASM_LOOP
377 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
378 wdc)
379#else
380 for (i = 0; i < cpuinfo.dcache_size;
381 i += cpuinfo.dcache_line_length)
382 __asm__ __volatile__ ("wdc %0, r0;" \
383 : : "r" (i));
384#endif
182} 385}
183 386
184void __invalidate_icache_user_range(struct vm_area_struct *vma, 387static void __invalidate_dcache_range_wb(unsigned long start,
185 struct page *page, unsigned long adr, 388 unsigned long end)
186 int len)
187{ 389{
188 __invalidate_icache_all(); 390#ifndef ASM_LOOP
391 int i;
392#endif
393 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
394 (unsigned int)start, (unsigned int) end);
395
396 CACHE_LOOP_LIMITS(start, end,
397 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
398#ifdef ASM_LOOP
399 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
400#else
401 for (i = start; i < end; i += cpuinfo.dcache_line_length)
402 __asm__ __volatile__ ("wdc.clear %0, r0;" \
403 : : "r" (i));
404#endif
189} 405}
190 406
191void __invalidate_cache_sigtramp(unsigned long addr) 407static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
408 unsigned long end)
192{ 409{
193 __invalidate_icache_range(addr, addr + 8); 410#ifndef ASM_LOOP
411 int i;
412#endif
413 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
414 (unsigned int)start, (unsigned int) end);
415 CACHE_LOOP_LIMITS(start, end,
416 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
417
418#ifdef ASM_LOOP
419 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
420#else
421 for (i = start; i < end; i += cpuinfo.dcache_line_length)
422 __asm__ __volatile__ ("wdc %0, r0;" \
423 : : "r" (i));
424#endif
194} 425}
195 426
196void __invalidate_dcache_all(void) 427static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
428 unsigned long end)
197{ 429{
198 unsigned int i; 430 unsigned long flags;
199 unsigned flags; 431#ifndef ASM_LOOP
432 int i;
433#endif
434 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
435 (unsigned int)start, (unsigned int) end);
436 CACHE_LOOP_LIMITS(start, end,
437 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
200 438
201 if (cpuinfo.use_dcache) { 439 local_irq_save(flags);
202 local_irq_save(flags); 440 __disable_dcache_msr();
203 __disable_dcache();
204 441
205 /* 442#ifdef ASM_LOOP
206 * Just loop through cache size and invalidate, 443 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
207 * no need to add CACHE_BASE address 444#else
208 */ 445 for (i = start; i < end; i += cpuinfo.dcache_line_length)
209 for (i = 0; i < cpuinfo.dcache_size; 446 __asm__ __volatile__ ("wdc %0, r0;" \
210 i += cpuinfo.dcache_line) 447 : : "r" (i));
211 __invalidate_dcache(i); 448#endif
212 449
213 __enable_dcache(); 450 __enable_dcache_msr();
214 local_irq_restore(flags); 451 local_irq_restore(flags);
215 }
216} 452}
217 453
218void __invalidate_dcache_range(unsigned long start, unsigned long end) 454static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
455 unsigned long end)
219{ 456{
220 unsigned int i; 457 unsigned long flags;
221 unsigned flags; 458#ifndef ASM_LOOP
222 unsigned int align; 459 int i;
460#endif
461 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
462 (unsigned int)start, (unsigned int) end);
223 463
224 if (cpuinfo.use_dcache) { 464 CACHE_LOOP_LIMITS(start, end,
225 /* 465 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
226 * No need to cover entire cache range,
227 * just cover cache footprint
228 */
229 end = min(start + cpuinfo.dcache_size, end);
230 align = ~(cpuinfo.dcache_line - 1);
231 start &= align; /* Make sure we are aligned */
232 /* Push end up to the next cache line */
233 end = ((end & align) + cpuinfo.dcache_line);
234 local_irq_save(flags);
235 __disable_dcache();
236 466
237 for (i = start; i < end; i += cpuinfo.dcache_line) 467 local_irq_save(flags);
238 __invalidate_dcache(i); 468 __disable_dcache_nomsr();
239 469
240 __enable_dcache(); 470#ifdef ASM_LOOP
241 local_irq_restore(flags); 471 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
242 } 472#else
473 for (i = start; i < end; i += cpuinfo.dcache_line_length)
474 __asm__ __volatile__ ("wdc %0, r0;" \
475 : : "r" (i));
476#endif
477
478 __enable_dcache_nomsr();
479 local_irq_restore(flags);
480}
481
482static void __flush_dcache_all_wb(void)
483{
484#ifndef ASM_LOOP
485 int i;
486#endif
487 pr_debug("%s\n", __func__);
488#ifdef ASM_LOOP
489 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
490 wdc.flush);
491#else
492 for (i = 0; i < cpuinfo.dcache_size;
493 i += cpuinfo.dcache_line_length)
494 __asm__ __volatile__ ("wdc.flush %0, r0;" \
495 : : "r" (i));
496#endif
243} 497}
244 498
245void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page) 499static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
246{ 500{
247 __invalidate_dcache_all(); 501#ifndef ASM_LOOP
502 int i;
503#endif
504 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
505 (unsigned int)start, (unsigned int) end);
506
507 CACHE_LOOP_LIMITS(start, end,
508 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
509#ifdef ASM_LOOP
510 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
511#else
512 for (i = start; i < end; i += cpuinfo.dcache_line_length)
513 __asm__ __volatile__ ("wdc.flush %0, r0;" \
514 : : "r" (i));
515#endif
248} 516}
249 517
250void __invalidate_dcache_user_range(struct vm_area_struct *vma, 518/* struct for wb caches and for wt caches */
251 struct page *page, unsigned long adr, 519struct scache *mbc;
252 int len) 520
521/* new wb cache model */
522const struct scache wb_msr = {
523 .ie = __enable_icache_msr,
524 .id = __disable_icache_msr,
525 .ifl = __flush_icache_all_noirq,
526 .iflr = __flush_icache_range_noirq,
527 .iin = __flush_icache_all_noirq,
528 .iinr = __flush_icache_range_noirq,
529 .de = __enable_dcache_msr,
530 .dd = __disable_dcache_msr,
531 .dfl = __flush_dcache_all_wb,
532 .dflr = __flush_dcache_range_wb,
533 .din = __invalidate_dcache_all_wb,
534 .dinr = __invalidate_dcache_range_wb,
535};
536
537/* There is only difference in ie, id, de, dd functions */
538const struct scache wb_nomsr = {
539 .ie = __enable_icache_nomsr,
540 .id = __disable_icache_nomsr,
541 .ifl = __flush_icache_all_noirq,
542 .iflr = __flush_icache_range_noirq,
543 .iin = __flush_icache_all_noirq,
544 .iinr = __flush_icache_range_noirq,
545 .de = __enable_dcache_nomsr,
546 .dd = __disable_dcache_nomsr,
547 .dfl = __flush_dcache_all_wb,
548 .dflr = __flush_dcache_range_wb,
549 .din = __invalidate_dcache_all_wb,
550 .dinr = __invalidate_dcache_range_wb,
551};
552
553/* Old wt cache model with disabling irq and turn off cache */
554const struct scache wt_msr = {
555 .ie = __enable_icache_msr,
556 .id = __disable_icache_msr,
557 .ifl = __flush_icache_all_msr_irq,
558 .iflr = __flush_icache_range_msr_irq,
559 .iin = __flush_icache_all_msr_irq,
560 .iinr = __flush_icache_range_msr_irq,
561 .de = __enable_dcache_msr,
562 .dd = __disable_dcache_msr,
563 .dfl = __invalidate_dcache_all_msr_irq,
564 .dflr = __invalidate_dcache_range_msr_irq_wt,
565 .din = __invalidate_dcache_all_msr_irq,
566 .dinr = __invalidate_dcache_range_msr_irq_wt,
567};
568
569const struct scache wt_nomsr = {
570 .ie = __enable_icache_nomsr,
571 .id = __disable_icache_nomsr,
572 .ifl = __flush_icache_all_nomsr_irq,
573 .iflr = __flush_icache_range_nomsr_irq,
574 .iin = __flush_icache_all_nomsr_irq,
575 .iinr = __flush_icache_range_nomsr_irq,
576 .de = __enable_dcache_nomsr,
577 .dd = __disable_dcache_nomsr,
578 .dfl = __invalidate_dcache_all_nomsr_irq,
579 .dflr = __invalidate_dcache_range_nomsr_irq,
580 .din = __invalidate_dcache_all_nomsr_irq,
581 .dinr = __invalidate_dcache_range_nomsr_irq,
582};
583
584/* New wt cache model for newer Microblaze versions */
585const struct scache wt_msr_noirq = {
586 .ie = __enable_icache_msr,
587 .id = __disable_icache_msr,
588 .ifl = __flush_icache_all_noirq,
589 .iflr = __flush_icache_range_noirq,
590 .iin = __flush_icache_all_noirq,
591 .iinr = __flush_icache_range_noirq,
592 .de = __enable_dcache_msr,
593 .dd = __disable_dcache_msr,
594 .dfl = __invalidate_dcache_all_noirq_wt,
595 .dflr = __invalidate_dcache_range_nomsr_wt,
596 .din = __invalidate_dcache_all_noirq_wt,
597 .dinr = __invalidate_dcache_range_nomsr_wt,
598};
599
600const struct scache wt_nomsr_noirq = {
601 .ie = __enable_icache_nomsr,
602 .id = __disable_icache_nomsr,
603 .ifl = __flush_icache_all_noirq,
604 .iflr = __flush_icache_range_noirq,
605 .iin = __flush_icache_all_noirq,
606 .iinr = __flush_icache_range_noirq,
607 .de = __enable_dcache_nomsr,
608 .dd = __disable_dcache_nomsr,
609 .dfl = __invalidate_dcache_all_noirq_wt,
610 .dflr = __invalidate_dcache_range_nomsr_wt,
611 .din = __invalidate_dcache_all_noirq_wt,
612 .dinr = __invalidate_dcache_range_nomsr_wt,
613};
614
615/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
616#define CPUVER_7_20_A 0x0c
617#define CPUVER_7_20_D 0x0f
618
619#define INFO(s) printk(KERN_INFO "cache: " s "\n");
620
621void microblaze_cache_init(void)
253{ 622{
254 __invalidate_dcache_all(); 623 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
624 if (cpuinfo.dcache_wb) {
625 INFO("wb_msr");
626 mbc = (struct scache *)&wb_msr;
627 if (cpuinfo.ver_code < CPUVER_7_20_D) {
628 /* MS: problem with signal handling - hw bug */
629 INFO("WB won't work properly");
630 }
631 } else {
632 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
633 INFO("wt_msr_noirq");
634 mbc = (struct scache *)&wt_msr_noirq;
635 } else {
636 INFO("wt_msr");
637 mbc = (struct scache *)&wt_msr;
638 }
639 }
640 } else {
641 if (cpuinfo.dcache_wb) {
642 INFO("wb_nomsr");
643 mbc = (struct scache *)&wb_nomsr;
644 if (cpuinfo.ver_code < CPUVER_7_20_D) {
645 /* MS: problem with signal handling - hw bug */
646 INFO("WB won't work properly");
647 }
648 } else {
649 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
650 INFO("wt_nomsr_noirq");
651 mbc = (struct scache *)&wt_nomsr_noirq;
652 } else {
653 INFO("wt_nomsr");
654 mbc = (struct scache *)&wt_nomsr;
655 }
656 }
657 }
658/* FIXME Invalidation is done in U-BOOT
659 * WT cache: Data is already written to main memory
660 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
661 */
662 /* invalidate_dcache(); */
663 enable_dcache();
664
665 invalidate_icache();
666 enable_icache();
255} 667}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index c259786e7faa..f72dbd66c844 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -21,8 +21,14 @@
21 */ 21 */
22 22
23#define CI(c, p) { ci->c = PVR_##p(pvr); } 23#define CI(c, p) { ci->c = PVR_##p(pvr); }
24
25#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
24#define err_printk(x) \ 26#define err_printk(x) \
25 early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); 27 early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
28#else
29#define err_printk(x) \
30 printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n");
31#endif
26 32
27void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) 33void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
28{ 34{
@@ -70,7 +76,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
70 CI(use_icache, USE_ICACHE); 76 CI(use_icache, USE_ICACHE);
71 CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); 77 CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
72 CI(icache_write, ICACHE_ALLOW_WR); 78 CI(icache_write, ICACHE_ALLOW_WR);
73 CI(icache_line, ICACHE_LINE_LEN); 79 ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2;
74 CI(icache_size, ICACHE_BYTE_SIZE); 80 CI(icache_size, ICACHE_BYTE_SIZE);
75 CI(icache_base, ICACHE_BASEADDR); 81 CI(icache_base, ICACHE_BASEADDR);
76 CI(icache_high, ICACHE_HIGHADDR); 82 CI(icache_high, ICACHE_HIGHADDR);
@@ -78,11 +84,16 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
78 CI(use_dcache, USE_DCACHE); 84 CI(use_dcache, USE_DCACHE);
79 CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); 85 CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
80 CI(dcache_write, DCACHE_ALLOW_WR); 86 CI(dcache_write, DCACHE_ALLOW_WR);
81 CI(dcache_line, DCACHE_LINE_LEN); 87 ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2;
82 CI(dcache_size, DCACHE_BYTE_SIZE); 88 CI(dcache_size, DCACHE_BYTE_SIZE);
83 CI(dcache_base, DCACHE_BASEADDR); 89 CI(dcache_base, DCACHE_BASEADDR);
84 CI(dcache_high, DCACHE_HIGHADDR); 90 CI(dcache_high, DCACHE_HIGHADDR);
85 91
92 temp = PVR_DCACHE_USE_WRITEBACK(pvr);
93 if (ci->dcache_wb != temp)
94 err_printk("DCACHE WB");
95 ci->dcache_wb = temp;
96
86 CI(use_dopb, D_OPB); 97 CI(use_dopb, D_OPB);
87 CI(use_iopb, I_OPB); 98 CI(use_iopb, I_OPB);
88 CI(use_dlmb, D_LMB); 99 CI(use_dlmb, D_LMB);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index adb448f93d5f..6095aa6b5c88 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -72,12 +72,12 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
72 ci->use_icache = fcpu(cpu, "xlnx,use-icache"); 72 ci->use_icache = fcpu(cpu, "xlnx,use-icache");
73 ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits"); 73 ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
74 ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr"); 74 ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
75 ci->icache_line = fcpu(cpu, "xlnx,icache-line-len") << 2; 75 ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
76 if (!ci->icache_line) { 76 if (!ci->icache_line_length) {
77 if (fcpu(cpu, "xlnx,icache-use-fsl")) 77 if (fcpu(cpu, "xlnx,icache-use-fsl"))
78 ci->icache_line = 4 << 2; 78 ci->icache_line_length = 4 << 2;
79 else 79 else
80 ci->icache_line = 1 << 2; 80 ci->icache_line_length = 1 << 2;
81 } 81 }
82 ci->icache_size = fcpu(cpu, "i-cache-size"); 82 ci->icache_size = fcpu(cpu, "i-cache-size");
83 ci->icache_base = fcpu(cpu, "i-cache-baseaddr"); 83 ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
@@ -86,16 +86,17 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
86 ci->use_dcache = fcpu(cpu, "xlnx,use-dcache"); 86 ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
87 ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag"); 87 ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
88 ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr"); 88 ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
89 ci->dcache_line = fcpu(cpu, "xlnx,dcache-line-len") << 2; 89 ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
90 if (!ci->dcache_line) { 90 if (!ci->dcache_line_length) {
91 if (fcpu(cpu, "xlnx,dcache-use-fsl")) 91 if (fcpu(cpu, "xlnx,dcache-use-fsl"))
92 ci->dcache_line = 4 << 2; 92 ci->dcache_line_length = 4 << 2;
93 else 93 else
94 ci->dcache_line = 1 << 2; 94 ci->dcache_line_length = 1 << 2;
95 } 95 }
96 ci->dcache_size = fcpu(cpu, "d-cache-size"); 96 ci->dcache_size = fcpu(cpu, "d-cache-size");
97 ci->dcache_base = fcpu(cpu, "d-cache-baseaddr"); 97 ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
98 ci->dcache_high = fcpu(cpu, "d-cache-highaddr"); 98 ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
99 ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
99 100
100 ci->use_dopb = fcpu(cpu, "xlnx,d-opb"); 101 ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
101 ci->use_iopb = fcpu(cpu, "xlnx,i-opb"); 102 ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 3539babc1c18..255ef880351e 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/slab.h>
13#include <asm/cpuinfo.h> 12#include <asm/cpuinfo.h>
14#include <asm/pvr.h> 13#include <asm/pvr.h>
15 14
@@ -29,11 +28,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
29 {"7.20.a", 0x0c}, 28 {"7.20.a", 0x0c},
30 {"7.20.b", 0x0d}, 29 {"7.20.b", 0x0d},
31 {"7.20.c", 0x0e}, 30 {"7.20.c", 0x0e},
32 /* FIXME There is no keycode defined in MBV for these versions */ 31 {"7.20.d", 0x0f},
33 {"2.10.a", 0x10}, 32 {"7.30.a", 0x10},
34 {"3.00.a", 0x20},
35 {"4.00.a", 0x30},
36 {"4.00.b", 0x40},
37 {NULL, 0}, 33 {NULL, 0},
38}; 34};
39 35
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index 4dcfccdbc364..4216eb1eaa32 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -98,16 +98,22 @@ static int show_cpuinfo(struct seq_file *m, void *v)
98 98
99 if (cpuinfo.use_icache) 99 if (cpuinfo.use_icache)
100 count += seq_printf(m, 100 count += seq_printf(m,
101 "Icache:\t\t%ukB\n", 101 "Icache:\t\t%ukB\tline length:\t%dB\n",
102 cpuinfo.icache_size >> 10); 102 cpuinfo.icache_size >> 10,
103 cpuinfo.icache_line_length);
103 else 104 else
104 count += seq_printf(m, "Icache:\t\tno\n"); 105 count += seq_printf(m, "Icache:\t\tno\n");
105 106
106 if (cpuinfo.use_dcache) 107 if (cpuinfo.use_dcache) {
107 count += seq_printf(m, 108 count += seq_printf(m,
108 "Dcache:\t\t%ukB\n", 109 "Dcache:\t\t%ukB\tline length:\t%dB\n",
109 cpuinfo.dcache_size >> 10); 110 cpuinfo.dcache_size >> 10,
110 else 111 cpuinfo.dcache_line_length);
112 if (cpuinfo.dcache_wb)
113 count += seq_printf(m, "\t\twrite-back\n");
114 else
115 count += seq_printf(m, "\t\twrite-through\n");
116 } else
111 count += seq_printf(m, "Dcache:\t\tno\n"); 117 count += seq_printf(m, "Dcache:\t\tno\n");
112 118
113 count += seq_printf(m, 119 count += seq_printf(m,
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index c9a4340ddd53..9bee9382bf74 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -45,7 +45,7 @@
45 45
46int cpu_has_pvr(void) 46int cpu_has_pvr(void)
47{ 47{
48 unsigned flags; 48 unsigned long flags;
49 unsigned pvr0; 49 unsigned pvr0;
50 50
51 local_save_flags(flags); 51 local_save_flags(flags);
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 000000000000..9dcd90b5df55
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/gfp.h>
12#include <linux/dma-debug.h>
13#include <asm/bug.h>
14#include <asm/cacheflush.h>
15
16/*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
24static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
25 size_t size, enum dma_data_direction direction)
26{
27 switch (direction) {
28 case DMA_TO_DEVICE:
29 flush_dcache_range(paddr + offset, paddr + offset + size);
30 break;
31 case DMA_FROM_DEVICE:
32 invalidate_dcache_range(paddr + offset, paddr + offset + size);
33 break;
34 default:
35 BUG();
36 }
37}
38
39static unsigned long get_dma_direct_offset(struct device *dev)
40{
41 if (likely(dev))
42 return (unsigned long)dev->archdata.dma_data;
43
44 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
45}
46
47#define NOT_COHERENT_CACHE
48
49static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
50 dma_addr_t *dma_handle, gfp_t flag)
51{
52#ifdef NOT_COHERENT_CACHE
53 return consistent_alloc(flag, size, dma_handle);
54#else
55 void *ret;
56 struct page *page;
57 int node = dev_to_node(dev);
58
59 /* ignore region specifiers */
60 flag &= ~(__GFP_HIGHMEM);
61
62 page = alloc_pages_node(node, flag, get_order(size));
63 if (page == NULL)
64 return NULL;
65 ret = page_address(page);
66 memset(ret, 0, size);
67 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
68
69 return ret;
70#endif
71}
72
73static void dma_direct_free_coherent(struct device *dev, size_t size,
74 void *vaddr, dma_addr_t dma_handle)
75{
76#ifdef NOT_COHERENT_CACHE
77 consistent_free(size, vaddr);
78#else
79 free_pages((unsigned long)vaddr, get_order(size));
80#endif
81}
82
83static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
84 int nents, enum dma_data_direction direction,
85 struct dma_attrs *attrs)
86{
87 struct scatterlist *sg;
88 int i;
89
90 /* FIXME this part of code is untested */
91 for_each_sg(sgl, sg, nents, i) {
92 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
93 sg->dma_length = sg->length;
94 __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
95 sg->length, direction);
96 }
97
98 return nents;
99}
100
101static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
102 int nents, enum dma_data_direction direction,
103 struct dma_attrs *attrs)
104{
105}
106
107static int dma_direct_dma_supported(struct device *dev, u64 mask)
108{
109 return 1;
110}
111
112static inline dma_addr_t dma_direct_map_page(struct device *dev,
113 struct page *page,
114 unsigned long offset,
115 size_t size,
116 enum dma_data_direction direction,
117 struct dma_attrs *attrs)
118{
119 __dma_sync_page(page_to_phys(page), offset, size, direction);
120 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
121}
122
123static inline void dma_direct_unmap_page(struct device *dev,
124 dma_addr_t dma_address,
125 size_t size,
126 enum dma_data_direction direction,
127 struct dma_attrs *attrs)
128{
129/* There is not necessary to do cache cleanup
130 *
131 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
132 * dma_address is physical address
133 */
134 __dma_sync_page(dma_address, 0 , size, direction);
135}
136
137struct dma_map_ops dma_direct_ops = {
138 .alloc_coherent = dma_direct_alloc_coherent,
139 .free_coherent = dma_direct_free_coherent,
140 .map_sg = dma_direct_map_sg,
141 .unmap_sg = dma_direct_unmap_sg,
142 .dma_supported = dma_direct_dma_supported,
143 .map_page = dma_direct_map_page,
144 .unmap_page = dma_direct_unmap_page,
145};
146EXPORT_SYMBOL(dma_direct_ops);
147
148/* Number of entries preallocated for DMA-API debugging */
149#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
150
151static int __init dma_init(void)
152{
153 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
154
155 return 0;
156}
157fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 9083d85376a4..8cc18cd2cce6 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -122,7 +122,7 @@ ENTRY(_interrupt)
122 122
123ret_from_intr: 123ret_from_intr:
124 lwi r11, r1, PT_MODE 124 lwi r11, r1, PT_MODE
125 bneid r11, 3f 125 bneid r11, no_intr_resched
126 126
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */ 127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */ 128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
@@ -133,16 +133,18 @@ ret_from_intr:
133 bralid r15, schedule 133 bralid r15, schedule
134 nop 134 nop
1351: andi r11, r19, _TIF_SIGPENDING 1351: andi r11, r19, _TIF_SIGPENDING
136 beqid r11, no_intr_reshed 136 beqid r11, no_intr_resched
137 addk r5, r1, r0 137 addk r5, r1, r0
138 addk r7, r0, r0 138 addk r7, r0, r0
139 bralid r15, do_signal 139 bralid r15, do_signal
140 addk r6, r0, r0 140 addk r6, r0, r0
141 141
142no_intr_reshed: 142no_intr_resched:
143 /* Disable interrupts, we are now committed to the state restore */
144 disable_irq
145
143 /* save mode indicator */ 146 /* save mode indicator */
144 lwi r11, r1, PT_MODE 147 lwi r11, r1, PT_MODE
1453:
146 swi r11, r0, PER_CPU(KM) 148 swi r11, r0, PER_CPU(KM)
147 149
148 /* save r31 */ 150 /* save r31 */
@@ -208,8 +210,6 @@ ENTRY(_user_exception)
208 lwi r1, r1, TS_THREAD_INFO /* get the thread info */ 210 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
209 /* calculate kernel stack pointer */ 211 /* calculate kernel stack pointer */
210 addik r1, r1, THREAD_SIZE - PT_SIZE 212 addik r1, r1, THREAD_SIZE - PT_SIZE
211 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
212 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
2132: 2132:
214 swi r11, r1, PT_MODE /* store the mode */ 214 swi r11, r1, PT_MODE /* store the mode */
215 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */ 215 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
@@ -476,6 +476,8 @@ ENTRY(ret_from_fork)
476 nop 476 nop
477 477
478work_pending: 478work_pending:
479 enable_irq
480
479 andi r11, r19, _TIF_NEED_RESCHED 481 andi r11, r19, _TIF_NEED_RESCHED
480 beqi r11, 1f 482 beqi r11, 1f
481 bralid r15, schedule 483 bralid r15, schedule
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index e3ecb36dd554..c0ede25c5b99 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -31,6 +31,8 @@
31#include <linux/errno.h> 31#include <linux/errno.h>
32#include <asm/signal.h> 32#include <asm/signal.h>
33 33
34#undef DEBUG
35
34/* The size of a state save frame. */ 36/* The size of a state save frame. */
35#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) 37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
36 38
@@ -303,7 +305,7 @@ C_ENTRY(_user_exception):
303 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
304 addi r11, r0, 1; 306 addi r11, r0, 1;
305 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3062: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 3082: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
307 /* Save away the syscall number. */ 309 /* Save away the syscall number. */
308 swi r12, r1, PTO+PT_R0; 310 swi r12, r1, PTO+PT_R0;
309 tovirt(r1,r1) 311 tovirt(r1,r1)
@@ -320,8 +322,7 @@ C_ENTRY(_user_exception):
320 rtid r11, 0 322 rtid r11, 0
321 nop 323 nop
3223: 3243:
323 add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ 325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
324 lwi r11, r11, TS_THREAD_INFO /* get thread info */
325 lwi r11, r11, TI_FLAGS /* get flags in thread info */ 326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
326 andi r11, r11, _TIF_WORK_SYSCALL_MASK 327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
327 beqi r11, 4f 328 beqi r11, 4f
@@ -352,10 +353,12 @@ C_ENTRY(_user_exception):
352 add r12, r12, r12; /* convert num -> ptr */ 353 add r12, r12, r12; /* convert num -> ptr */
353 add r12, r12, r12; 354 add r12, r12, r12;
354 355
356#ifdef DEBUG
355 /* Trac syscalls and stored them to r0_ram */ 357 /* Trac syscalls and stored them to r0_ram */
356 lwi r3, r12, 0x400 + r0_ram 358 lwi r3, r12, 0x400 + r0_ram
357 addi r3, r3, 1 359 addi r3, r3, 1
358 swi r3, r12, 0x400 + r0_ram 360 swi r3, r12, 0x400 + r0_ram
361#endif
359 362
360 # Find and jump into the syscall handler. 363 # Find and jump into the syscall handler.
361 lwi r12, r12, sys_call_table 364 lwi r12, r12, sys_call_table
@@ -378,60 +381,50 @@ C_ENTRY(ret_from_trap):
378/* See if returning to kernel mode, if so, skip resched &c. */ 381/* See if returning to kernel mode, if so, skip resched &c. */
379 bnei r11, 2f; 382 bnei r11, 2f;
380 383
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
381 /* We're returning to user mode, so check for various conditions that 387 /* We're returning to user mode, so check for various conditions that
382 * trigger rescheduling. */ 388 * trigger rescheduling. */
383 # FIXME: Restructure all these flag checks. 389 /* FIXME: Restructure all these flag checks. */
384 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 390 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
385 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
386 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 391 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
387 andi r11, r11, _TIF_WORK_SYSCALL_MASK 392 andi r11, r11, _TIF_WORK_SYSCALL_MASK
388 beqi r11, 1f 393 beqi r11, 1f
389 394
390 swi r3, r1, PTO + PT_R3
391 swi r4, r1, PTO + PT_R4
392 brlid r15, do_syscall_trace_leave 395 brlid r15, do_syscall_trace_leave
393 addik r5, r1, PTO + PT_R0 396 addik r5, r1, PTO + PT_R0
394 lwi r3, r1, PTO + PT_R3
395 lwi r4, r1, PTO + PT_R4
3961: 3971:
397
398 /* We're returning to user mode, so check for various conditions that 398 /* We're returning to user mode, so check for various conditions that
399 * trigger rescheduling. */ 399 * trigger rescheduling. */
400 /* Get current task ptr into r11 */ 400 /* get thread info from current task */
401 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 401 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
402 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
403 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
404 andi r11, r11, _TIF_NEED_RESCHED; 403 andi r11, r11, _TIF_NEED_RESCHED;
405 beqi r11, 5f; 404 beqi r11, 5f;
406 405
407 swi r3, r1, PTO + PT_R3; /* store syscall result */
408 swi r4, r1, PTO + PT_R4;
409 bralid r15, schedule; /* Call scheduler */ 406 bralid r15, schedule; /* Call scheduler */
410 nop; /* delay slot */ 407 nop; /* delay slot */
411 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
412 lwi r4, r1, PTO + PT_R4;
413 408
414 /* Maybe handle a signal */ 409 /* Maybe handle a signal */
4155: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 4105: /* get thread info from current task*/
416 lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 411 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
417 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 412 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
418 andi r11, r11, _TIF_SIGPENDING; 413 andi r11, r11, _TIF_SIGPENDING;
419 beqi r11, 1f; /* Signals to handle, handle them */ 414 beqi r11, 1f; /* Signals to handle, handle them */
420 415
421 swi r3, r1, PTO + PT_R3; /* store syscall result */
422 swi r4, r1, PTO + PT_R4;
423 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 416 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
424 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
425 addi r7, r0, 1; /* Arg 3: int in_syscall */ 417 addi r7, r0, 1; /* Arg 3: int in_syscall */
426 bralid r15, do_signal; /* Handle any signals */ 418 bralid r15, do_signal; /* Handle any signals */
427 nop; 419 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
420
421/* Finally, return to user state. */
4221:
428 lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 423 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
429 lwi r4, r1, PTO + PT_R4; 424 lwi r4, r1, PTO + PT_R4;
430 425
431/* Finally, return to user state. */ 426 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
4321: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 427 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
433 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
434 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
435 VM_OFF; 428 VM_OFF;
436 tophys(r1,r1); 429 tophys(r1,r1);
437 RESTORE_REGS; 430 RESTORE_REGS;
@@ -496,17 +489,6 @@ C_ENTRY(sys_execve):
496 brid microblaze_execve; /* Do real work (tail-call).*/ 489 brid microblaze_execve; /* Do real work (tail-call).*/
497 nop; 490 nop;
498 491
499C_ENTRY(sys_rt_sigsuspend_wrapper):
500 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501 swi r4, r1, PTO+PT_R4;
502 la r7, r1, PTO; /* add user context as 3rd arg */
503 brlid r15, sys_rt_sigsuspend; /* Do real work.*/
504 nop;
505 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
506 lwi r4, r1, PTO+PT_R4;
507 bri ret_from_trap /* fall through will not work here due to align */
508 nop;
509
510C_ENTRY(sys_rt_sigreturn_wrapper): 492C_ENTRY(sys_rt_sigreturn_wrapper):
511 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ 493 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
512 swi r4, r1, PTO+PT_R4; 494 swi r4, r1, PTO+PT_R4;
@@ -572,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
572 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ 554 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
573 addi r11, r0, 1; \ 555 addi r11, r0, 1; \
574 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ 556 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5752: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ 5572: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
576 /* Save away the syscall number. */ \ 558 /* Save away the syscall number. */ \
577 swi r0, r1, PTO+PT_R0; \ 559 swi r0, r1, PTO+PT_R0; \
578 tovirt(r1,r1) 560 tovirt(r1,r1)
@@ -680,9 +662,7 @@ C_ENTRY(ret_from_exc):
680 662
681 /* We're returning to user mode, so check for various conditions that 663 /* We're returning to user mode, so check for various conditions that
682 trigger rescheduling. */ 664 trigger rescheduling. */
683 /* Get current task ptr into r11 */ 665 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
684 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
685 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
686 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 666 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
687 andi r11, r11, _TIF_NEED_RESCHED; 667 andi r11, r11, _TIF_NEED_RESCHED;
688 beqi r11, 5f; 668 beqi r11, 5f;
@@ -692,8 +672,7 @@ C_ENTRY(ret_from_exc):
692 nop; /* delay slot */ 672 nop; /* delay slot */
693 673
694 /* Maybe handle a signal */ 674 /* Maybe handle a signal */
6955: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 6755: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
696 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
697 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 676 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
698 andi r11, r11, _TIF_SIGPENDING; 677 andi r11, r11, _TIF_SIGPENDING;
699 beqi r11, 1f; /* Signals to handle, handle them */ 678 beqi r11, 1f; /* Signals to handle, handle them */
@@ -711,20 +690,14 @@ C_ENTRY(ret_from_exc):
711 * (in a possibly modified form) after do_signal returns. 690 * (in a possibly modified form) after do_signal returns.
712 * store return registers separately because this macros is use 691 * store return registers separately because this macros is use
713 * for others exceptions */ 692 * for others exceptions */
714 swi r3, r1, PTO + PT_R3;
715 swi r4, r1, PTO + PT_R4;
716 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 693 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
717 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
718 addi r7, r0, 0; /* Arg 3: int in_syscall */ 694 addi r7, r0, 0; /* Arg 3: int in_syscall */
719 bralid r15, do_signal; /* Handle any signals */ 695 bralid r15, do_signal; /* Handle any signals */
720 nop; 696 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
721 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
722 lwi r4, r1, PTO+PT_R4;
723 697
724/* Finally, return to user state. */ 698/* Finally, return to user state. */
7251: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 6991: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
726 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 700 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
727 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
728 VM_OFF; 701 VM_OFF;
729 tophys(r1,r1); 702 tophys(r1,r1);
730 703
@@ -813,7 +786,7 @@ C_ENTRY(_interrupt):
813 swi r11, r0, TOPHYS(PER_CPU(KM)); 786 swi r11, r0, TOPHYS(PER_CPU(KM));
814 787
8152: 7882:
816 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 789 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
817 swi r0, r1, PTO + PT_R0; 790 swi r0, r1, PTO + PT_R0;
818 tovirt(r1,r1) 791 tovirt(r1,r1)
819 la r5, r1, PTO; 792 la r5, r1, PTO;
@@ -828,8 +801,7 @@ ret_from_irq:
828 lwi r11, r1, PTO + PT_MODE; 801 lwi r11, r1, PTO + PT_MODE;
829 bnei r11, 2f; 802 bnei r11, 2f;
830 803
831 add r11, r0, CURRENT_TASK; 804 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
832 lwi r11, r11, TS_THREAD_INFO;
833 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ 805 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
834 andi r11, r11, _TIF_NEED_RESCHED; 806 andi r11, r11, _TIF_NEED_RESCHED;
835 beqi r11, 5f 807 beqi r11, 5f
@@ -837,8 +809,7 @@ ret_from_irq:
837 nop; /* delay slot */ 809 nop; /* delay slot */
838 810
839 /* Maybe handle a signal */ 811 /* Maybe handle a signal */
8405: add r11, r0, CURRENT_TASK; 8125: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
841 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
842 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 813 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
843 andi r11, r11, _TIF_SIGPENDING; 814 andi r11, r11, _TIF_SIGPENDING;
844 beqid r11, no_intr_resched 815 beqid r11, no_intr_resched
@@ -853,8 +824,7 @@ no_intr_resched:
853 /* Disable interrupts, we are now committed to the state restore */ 824 /* Disable interrupts, we are now committed to the state restore */
854 disable_irq 825 disable_irq
855 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ 826 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
856 add r11, r0, CURRENT_TASK; 827 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
857 swi r11, r0, PER_CPU(CURRENT_SAVE);
858 VM_OFF; 828 VM_OFF;
859 tophys(r1,r1); 829 tophys(r1,r1);
860 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 830 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@@ -864,7 +834,28 @@ no_intr_resched:
864 lwi r1, r1, PT_R1 - PT_SIZE; 834 lwi r1, r1, PT_R1 - PT_SIZE;
865 bri 6f; 835 bri 6f;
866/* MS: Return to kernel state. */ 836/* MS: Return to kernel state. */
8672: VM_OFF /* MS: turn off MMU */ 8372:
838#ifdef CONFIG_PREEMPT
839 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
840 /* MS: get preempt_count from thread info */
841 lwi r5, r11, TI_PREEMPT_COUNT;
842 bgti r5, restore;
843
844 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
845 andi r5, r5, _TIF_NEED_RESCHED;
846 beqi r5, restore /* if zero jump over */
847
848preempt:
849 /* interrupts are off that's why I am calling preempt_chedule_irq */
850 bralid r15, preempt_schedule_irq
851 nop
852 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
853 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
854 andi r5, r5, _TIF_NEED_RESCHED;
855 bnei r5, preempt /* if non zero jump to resched */
856restore:
857#endif
858 VM_OFF /* MS: turn off MMU */
868 tophys(r1,r1) 859 tophys(r1,r1)
869 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 860 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
870 lwi r4, r1, PTO + PT_R4; 861 lwi r4, r1, PTO + PT_R4;
@@ -926,7 +917,7 @@ C_ENTRY(_debug_exception):
926 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 917 swi r11, r1, PTO+PT_R1; /* Store user SP. */
927 addi r11, r0, 1; 918 addi r11, r0, 1;
928 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 919 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9292: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 9202: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
930 /* Save away the syscall number. */ 921 /* Save away the syscall number. */
931 swi r0, r1, PTO+PT_R0; 922 swi r0, r1, PTO+PT_R0;
932 tovirt(r1,r1) 923 tovirt(r1,r1)
@@ -946,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
946 bnei r11, 2f; 937 bnei r11, 2f;
947 938
948 /* Get current task ptr into r11 */ 939 /* Get current task ptr into r11 */
949 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 940 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
950 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
951 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 941 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
952 andi r11, r11, _TIF_NEED_RESCHED; 942 andi r11, r11, _TIF_NEED_RESCHED;
953 beqi r11, 5f; 943 beqi r11, 5f;
@@ -960,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
960 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ 950 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
961 951
962 /* Maybe handle a signal */ 952 /* Maybe handle a signal */
9635: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 9535: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
964 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
965 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
966 andi r11, r11, _TIF_SIGPENDING; 955 andi r11, r11, _TIF_SIGPENDING;
967 beqi r11, 1f; /* Signals to handle, handle them */ 956 beqi r11, 1f; /* Signals to handle, handle them */
@@ -977,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
977 (in a possibly modified form) after do_signal returns. */ 966 (in a possibly modified form) after do_signal returns. */
978 967
979 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
980 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
981 addi r7, r0, 0; /* Arg 3: int in_syscall */ 969 addi r7, r0, 0; /* Arg 3: int in_syscall */
982 bralid r15, do_signal; /* Handle any signals */ 970 bralid r15, do_signal; /* Handle any signals */
983 nop; 971 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
984 972
985 973
986/* Finally, return to user state. */ 974/* Finally, return to user state. */
9871: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 9751: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
988 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 976 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
989 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
990 VM_OFF; 977 VM_OFF;
991 tophys(r1,r1); 978 tophys(r1,r1);
992 979
@@ -1018,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
1018 1005
1019ENTRY(_switch_to) 1006ENTRY(_switch_to)
1020 /* prepare return value */ 1007 /* prepare return value */
1021 addk r3, r0, r31 1008 addk r3, r0, CURRENT_TASK
1022 1009
1023 /* save registers in cpu_context */ 1010 /* save registers in cpu_context */
1024 /* use r11 and r12, volatile registers, as temp register */ 1011 /* use r11 and r12, volatile registers, as temp register */
@@ -1062,10 +1049,10 @@ ENTRY(_switch_to)
1062 nop 1049 nop
1063 swi r12, r11, CC_FSR 1050 swi r12, r11, CC_FSR
1064 1051
1065 /* update r31, the current */ 1052 /* update r31, the current-give me pointer to task which will be next */
1066 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ 1053 lwi CURRENT_TASK, r6, TI_TASK
1067 /* stored it to current_save too */ 1054 /* stored it to current_save too */
1068 swi r31, r0, PER_CPU(CURRENT_SAVE) 1055 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1069 1056
1070 /* get new process' cpu context and restore */ 1057 /* get new process' cpu context and restore */
1071 /* give me start where start context of next task */ 1058 /* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index d9f70f83097f..02cbdfe5aa8d 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -121,7 +121,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
121 } 121 }
122 printk(KERN_WARNING "Divide by zero exception " \ 122 printk(KERN_WARNING "Divide by zero exception " \
123 "in kernel mode.\n"); 123 "in kernel mode.\n");
124 die("Divide by exception", regs, SIGBUS); 124 die("Divide by zero exception", regs, SIGBUS);
125 break; 125 break;
126 case MICROBLAZE_FPU_EXCEPTION: 126 case MICROBLAZE_FPU_EXCEPTION:
127 pr_debug(KERN_WARNING "FPU exception\n"); 127 pr_debug(KERN_WARNING "FPU exception\n");
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
new file mode 100644
index 000000000000..515feb404555
--- /dev/null
+++ b/arch/microblaze/kernel/ftrace.c
@@ -0,0 +1,231 @@
1/*
2 * Ftrace support for Microblaze.
3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009 PetaLogix
6 *
7 * Based on MIPS and PowerPC ftrace code
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <asm/cacheflush.h>
15#include <linux/ftrace.h>
16
17#ifdef CONFIG_FUNCTION_GRAPH_TRACER
18/*
19 * Hook the return address and push it in the stack of return addrs
20 * in current thread info.
21 */
22void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
23{
24 unsigned long old;
25 int faulted, err;
26 struct ftrace_graph_ent trace;
27 unsigned long return_hooker = (unsigned long)
28 &return_to_handler;
29
30 if (unlikely(atomic_read(&current->tracing_graph_pause)))
31 return;
32
33 /*
34 * Protect against fault, even if it shouldn't
35 * happen. This tool is too much intrusive to
36 * ignore such a protection.
37 */
38 asm volatile(" 1: lwi %0, %2, 0; \
39 2: swi %3, %2, 0; \
40 addik %1, r0, 0; \
41 3: \
42 .section .fixup, \"ax\"; \
43 4: brid 3b; \
44 addik %1, r0, 1; \
45 .previous; \
46 .section __ex_table,\"a\"; \
47 .word 1b,4b; \
48 .word 2b,4b; \
49 .previous;" \
50 : "=&r" (old), "=r" (faulted)
51 : "r" (parent), "r" (return_hooker)
52 );
53
54 if (unlikely(faulted)) {
55 ftrace_graph_stop();
56 WARN_ON(1);
57 return;
58 }
59
60 err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
61 if (err == -EBUSY) {
62 *parent = old;
63 return;
64 }
65
66 trace.func = self_addr;
67 /* Only trace if the calling function expects to */
68 if (!ftrace_graph_entry(&trace)) {
69 current->curr_ret_stack--;
70 *parent = old;
71 }
72}
73#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
74
75#ifdef CONFIG_DYNAMIC_FTRACE
76/* save value to addr - it is save to do it in asm */
77static int ftrace_modify_code(unsigned long addr, unsigned int value)
78{
79 int faulted = 0;
80
81 __asm__ __volatile__(" 1: swi %2, %1, 0; \
82 addik %0, r0, 0; \
83 2: \
84 .section .fixup, \"ax\"; \
85 3: brid 2b; \
86 addik %0, r0, 1; \
87 .previous; \
88 .section __ex_table,\"a\"; \
89 .word 1b,3b; \
90 .previous;" \
91 : "=r" (faulted)
92 : "r" (addr), "r" (value)
93 );
94
95 if (unlikely(faulted))
96 return -EFAULT;
97
98 return 0;
99}
100
101#define MICROBLAZE_NOP 0x80000000
102#define MICROBLAZE_BRI 0xb800000C
103
104static unsigned int recorded; /* if save was or not */
105static unsigned int imm; /* saving whole imm instruction */
106
107/* There are two approaches howto solve ftrace_make nop function - look below */
108#undef USE_FTRACE_NOP
109
110#ifdef USE_FTRACE_NOP
111static unsigned int bralid; /* saving whole bralid instruction */
112#endif
113
114int ftrace_make_nop(struct module *mod,
115 struct dyn_ftrace *rec, unsigned long addr)
116{
117 /* we have this part of code which we are working with
118 * b000c000 imm -16384
119 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
120 * 80000000 or r0, r0, r0
121 *
122 * The first solution (!USE_FTRACE_NOP-could be called branch solution)
123 * b000c000 bri 12 (0xC - jump to any other instruction)
124 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
125 * 80000000 or r0, r0, r0
126 * any other instruction
127 *
128 * The second solution (USE_FTRACE_NOP) - no jump just nops
129 * 80000000 or r0, r0, r0
130 * 80000000 or r0, r0, r0
131 * 80000000 or r0, r0, r0
132 */
133 int ret = 0;
134
135 if (recorded == 0) {
136 recorded = 1;
137 imm = *(unsigned int *)rec->ip;
138 pr_debug("%s: imm:0x%x\n", __func__, imm);
139#ifdef USE_FTRACE_NOP
140 bralid = *(unsigned int *)(rec->ip + 4);
141 pr_debug("%s: bralid 0x%x\n", __func__, bralid);
142#endif /* USE_FTRACE_NOP */
143 }
144
145#ifdef USE_FTRACE_NOP
146 ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP);
147 ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP);
148#else /* USE_FTRACE_NOP */
149 ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI);
150#endif /* USE_FTRACE_NOP */
151 return ret;
152}
153
154/* I believe that first is called ftrace_make_nop before this function */
155int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
156{
157 int ret;
158 pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
159 __func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
160 ret = ftrace_modify_code(rec->ip, imm);
161#ifdef USE_FTRACE_NOP
162 pr_debug("%s: bralid:0x%x\n", __func__, bralid);
163 ret += ftrace_modify_code(rec->ip + 4, bralid);
164#endif /* USE_FTRACE_NOP */
165 return ret;
166}
167
168int __init ftrace_dyn_arch_init(void *data)
169{
170 /* The return code is retured via data */
171 *(unsigned long *)data = 0;
172
173 return 0;
174}
175
176int ftrace_update_ftrace_func(ftrace_func_t func)
177{
178 unsigned long ip = (unsigned long)(&ftrace_call);
179 unsigned int upper = (unsigned int)func;
180 unsigned int lower = (unsigned int)func;
181 int ret = 0;
182
183 /* create proper saving to ftrace_call poll */
184 upper = 0xb0000000 + (upper >> 16); /* imm func_upper */
185 lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */
186
187 pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n",
188 __func__, (unsigned int)func, (unsigned int)ip, upper, lower);
189
190 /* save upper and lower code */
191 ret = ftrace_modify_code(ip, upper);
192 ret += ftrace_modify_code(ip + 4, lower);
193
194 /* We just need to replace the rtsd r15, 8 with NOP */
195 ret += ftrace_modify_code((unsigned long)&ftrace_caller,
196 MICROBLAZE_NOP);
197
198 /* All changes are done - lets do caches consistent */
199 flush_icache();
200 return ret;
201}
202
203#ifdef CONFIG_FUNCTION_GRAPH_TRACER
204unsigned int old_jump; /* saving place for jump instruction */
205
206int ftrace_enable_ftrace_graph_caller(void)
207{
208 unsigned int ret;
209 unsigned long ip = (unsigned long)(&ftrace_call_graph);
210
211 old_jump = *(unsigned int *)ip; /* save jump over instruction */
212 ret = ftrace_modify_code(ip, MICROBLAZE_NOP);
213 flush_icache();
214
215 pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump);
216 return ret;
217}
218
219int ftrace_disable_ftrace_graph_caller(void)
220{
221 unsigned int ret;
222 unsigned long ip = (unsigned long)(&ftrace_call_graph);
223
224 ret = ftrace_modify_code(ip, old_jump);
225 flush_icache();
226
227 pr_debug("%s\n", __func__);
228 return ret;
229}
230#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
231#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 697ce3007f30..1bf739888260 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -28,10 +28,11 @@
28 * for more details. 28 * for more details.
29 */ 29 */
30 30
31#include <linux/init.h>
31#include <linux/linkage.h> 32#include <linux/linkage.h>
32#include <asm/thread_info.h> 33#include <asm/thread_info.h>
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/prom.h> /* for OF_DT_HEADER */ 35#include <linux/of_fdt.h> /* for OF_DT_HEADER */
35 36
36#ifdef CONFIG_MMU 37#ifdef CONFIG_MMU
37#include <asm/setup.h> /* COMMAND_LINE_SIZE */ 38#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -49,8 +50,14 @@ swapper_pg_dir:
49 50
50#endif /* CONFIG_MMU */ 51#endif /* CONFIG_MMU */
51 52
52 .text 53 __HEAD
53ENTRY(_start) 54ENTRY(_start)
55#if CONFIG_KERNEL_BASE_ADDR == 0
56 brai TOPHYS(real_start)
57 .org 0x100
58real_start:
59#endif
60
54 mfs r1, rmsr 61 mfs r1, rmsr
55 andi r1, r1, ~2 62 andi r1, r1, ~2
56 mts rmsr, r1 63 mts rmsr, r1
@@ -99,8 +106,8 @@ no_fdt_arg:
99 tophys(r4,r4) /* convert to phys address */ 106 tophys(r4,r4) /* convert to phys address */
100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 107 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
101_copy_command_line: 108_copy_command_line:
102 lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 109 lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
103 sb r7, r4, r6 /* addr[r4+r6]= r7*/ 110 sb r2, r4, r6 /* addr[r4+r6]= r2*/
104 addik r6, r6, 1 /* increment counting */ 111 addik r6, r6, 1 /* increment counting */
105 bgtid r3, _copy_command_line /* loop for all entries */ 112 bgtid r3, _copy_command_line /* loop for all entries */
106 addik r3, r3, -1 /* descrement loop */ 113 addik r3, r3, -1 /* descrement loop */
@@ -128,7 +135,7 @@ _copy_bram:
128 * virtual to physical. 135 * virtual to physical.
129 */ 136 */
130 nop 137 nop
131 addik r3, r0, 63 /* Invalidate all TLB entries */ 138 addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
132_invalidate: 139_invalidate:
133 mts rtlbx, r3 140 mts rtlbx, r3
134 mts rtlbhi, r0 /* flush: ensure V is clear */ 141 mts rtlbhi, r0 /* flush: ensure V is clear */
@@ -136,6 +143,11 @@ _invalidate:
136 addik r3, r3, -1 143 addik r3, r3, -1
137 /* sync */ 144 /* sync */
138 145
146 /* Setup the kernel PID */
147 mts rpid,r0 /* Load the kernel PID */
148 nop
149 bri 4
150
139 /* 151 /*
140 * We should still be executing code at physical address area 152 * We should still be executing code at physical address area
141 * RAM_BASEADDR at this point. However, kernel code is at 153 * RAM_BASEADDR at this point. However, kernel code is at
@@ -146,10 +158,6 @@ _invalidate:
146 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ 158 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
147 tophys(r4,r3) /* Load the kernel physical address */ 159 tophys(r4,r3) /* Load the kernel physical address */
148 160
149 mts rpid,r0 /* Load the kernel PID */
150 nop
151 bri 4
152
153 /* 161 /*
154 * Configure and load two entries into TLB slots 0 and 1. 162 * Configure and load two entries into TLB slots 0 and 1.
155 * In case we are pinning TLBs, these are reserved in by the 163 * In case we are pinning TLBs, these are reserved in by the
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 1bdf20222b92..522751737cfa 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -45,6 +45,7 @@ void heartbeat(void)
45void setup_heartbeat(void) 45void setup_heartbeat(void)
46{ 46{
47 struct device_node *gpio = NULL; 47 struct device_node *gpio = NULL;
48 int *prop;
48 int j; 49 int j;
49 char *gpio_list[] = { 50 char *gpio_list[] = {
50 "xlnx,xps-gpio-1.00.a", 51 "xlnx,xps-gpio-1.00.a",
@@ -58,10 +59,14 @@ void setup_heartbeat(void)
58 break; 59 break;
59 } 60 }
60 61
61 base_addr = *(int *) of_get_property(gpio, "reg", NULL); 62 if (gpio) {
62 base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE); 63 base_addr = *(int *) of_get_property(gpio, "reg", NULL);
63 printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr); 64 base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
65 printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
64 66
65 if (*(int *) of_get_property(gpio, "xlnx,is-bidir", NULL)) 67 /* GPIO is configured as output */
66 out_be32(base_addr + 4, 0); /* GPIO is configured as output */ 68 prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
69 if (prop)
70 out_be32(base_addr + 4, 0);
71 }
67} 72}
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa841..995a2123635b 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
313 mfs r5, rmsr; 313 mfs r5, rmsr;
314 nop 314 nop
315 swi r5, r1, 0; 315 swi r5, r1, 0;
316 mfs r3, resr 316 mfs r4, resr
317 nop 317 nop
318 mfs r4, rear; 318 mfs r3, rear;
319 nop 319 nop
320 320
321#ifndef CONFIG_MMU 321#ifndef CONFIG_MMU
322 andi r5, r3, 0x1000; /* Check ESR[DS] */ 322 andi r5, r4, 0x1000; /* Check ESR[DS] */
323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
325 nop 325 nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
327 swi r17, r1, PT_R17 327 swi r17, r1, PT_R17
328#endif 328#endif
329 329
330 andi r5, r3, 0x1F; /* Extract ESR[EXC] */ 330 andi r5, r4, 0x1F; /* Extract ESR[EXC] */
331 331
332#ifdef CONFIG_MMU 332#ifdef CONFIG_MMU
333 /* Calculate exception vector offset = r5 << 2 */ 333 /* Calculate exception vector offset = r5 << 2 */
334 addk r6, r5, r5; /* << 1 */ 334 addk r6, r5, r5; /* << 1 */
335 addk r6, r6, r6; /* << 2 */ 335 addk r6, r6, r6; /* << 2 */
336 336
337#ifdef DEBUG
337/* counting which exception happen */ 338/* counting which exception happen */
338 lwi r5, r0, 0x200 + TOPHYS(r0_ram) 339 lwi r5, r0, 0x200 + TOPHYS(r0_ram)
339 addi r5, r5, 1 340 addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
341 lwi r5, r6, 0x200 + TOPHYS(r0_ram) 342 lwi r5, r6, 0x200 + TOPHYS(r0_ram)
342 addi r5, r5, 1 343 addi r5, r5, 1
343 swi r5, r6, 0x200 + TOPHYS(r0_ram) 344 swi r5, r6, 0x200 + TOPHYS(r0_ram)
345#endif
344/* end */ 346/* end */
345 /* Load the HW Exception vector */ 347 /* Load the HW Exception vector */
346 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) 348 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
376 swi r18, r1, PT_R18 378 swi r18, r1, PT_R18
377 379
378 or r5, r1, r0 380 or r5, r1, r0
379 andi r6, r3, 0x1F; /* Load ESR[EC] */ 381 andi r6, r4, 0x1F; /* Load ESR[EC] */
380 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 382 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
381 swi r7, r1, PT_MODE 383 swi r7, r1, PT_MODE
382 mfs r7, rfsr 384 mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
426 */ 428 */
427handle_unaligned_ex: 429handle_unaligned_ex:
428 /* Working registers already saved: R3, R4, R5, R6 430 /* Working registers already saved: R3, R4, R5, R6
429 * R3 = ESR 431 * R4 = ESR
430 * R4 = EAR 432 * R3 = EAR
431 */ 433 */
432#ifdef CONFIG_MMU 434#ifdef CONFIG_MMU
433 andi r6, r3, 0x1000 /* Check ESR[DS] */ 435 andi r6, r4, 0x1000 /* Check ESR[DS] */
434 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 436 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
435 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 437 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
436 nop 438 nop
@@ -439,7 +441,7 @@ _no_delayslot:
439 RESTORE_STATE; 441 RESTORE_STATE;
440 bri unaligned_data_trap 442 bri unaligned_data_trap
441#endif 443#endif
442 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 444 andi r6, r4, 0x3E0; /* Mask and extract the register operand */
443 srl r6, r6; /* r6 >> 5 */ 445 srl r6, r6; /* r6 >> 5 */
444 srl r6, r6; 446 srl r6, r6;
445 srl r6, r6; 447 srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
448 /* Store the register operand in a temporary location */ 450 /* Store the register operand in a temporary location */
449 sbi r6, r0, TOPHYS(ex_reg_op); 451 sbi r6, r0, TOPHYS(ex_reg_op);
450 452
451 andi r6, r3, 0x400; /* Extract ESR[S] */ 453 andi r6, r4, 0x400; /* Extract ESR[S] */
452 bnei r6, ex_sw; 454 bnei r6, ex_sw;
453ex_lw: 455ex_lw:
454 andi r6, r3, 0x800; /* Extract ESR[W] */ 456 andi r6, r4, 0x800; /* Extract ESR[W] */
455 beqi r6, ex_lhw; 457 beqi r6, ex_lhw;
456 lbui r5, r4, 0; /* Exception address in r4 */ 458 lbui r5, r3, 0; /* Exception address in r3 */
457 /* Load a word, byte-by-byte from destination address 459 /* Load a word, byte-by-byte from destination address
458 and save it in tmp space */ 460 and save it in tmp space */
459 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
460 lbui r5, r4, 1; 462 lbui r5, r3, 1;
461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
462 lbui r5, r4, 2; 464 lbui r5, r3, 2;
463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
464 lbui r5, r4, 3; 466 lbui r5, r3, 3;
465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 467 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
466 /* Get the destination register value into r3 */ 468 /* Get the destination register value into r4 */
467 lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); 469 lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
468 bri ex_lw_tail; 470 bri ex_lw_tail;
469ex_lhw: 471ex_lhw:
470 lbui r5, r4, 0; /* Exception address in r4 */ 472 lbui r5, r3, 0; /* Exception address in r3 */
471 /* Load a half-word, byte-by-byte from destination 473 /* Load a half-word, byte-by-byte from destination
472 address and save it in tmp space */ 474 address and save it in tmp space */
473 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
474 lbui r5, r4, 1; 476 lbui r5, r3, 1;
475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 477 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
476 /* Get the destination register value into r3 */ 478 /* Get the destination register value into r4 */
477 lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); 479 lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
478ex_lw_tail: 480ex_lw_tail:
479 /* Get the destination register number into r5 */ 481 /* Get the destination register number into r5 */
480 lbui r5, r0, TOPHYS(ex_reg_op); 482 lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
502 andi r6, r6, 0x800; /* Extract ESR[W] */ 504 andi r6, r6, 0x800; /* Extract ESR[W] */
503 beqi r6, ex_shw; 505 beqi r6, ex_shw;
504 /* Get the word - delay slot */ 506 /* Get the word - delay slot */
505 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 507 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
506 /* Store the word, byte-by-byte into destination address */ 508 /* Store the word, byte-by-byte into destination address */
507 lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); 509 lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
508 sbi r3, r4, 0; 510 sbi r4, r3, 0;
509 lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); 511 lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
510 sbi r3, r4, 1; 512 sbi r4, r3, 1;
511 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 513 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
512 sbi r3, r4, 2; 514 sbi r4, r3, 2;
513 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 515 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
514 sbi r3, r4, 3; 516 sbi r4, r3, 3;
515 bri ex_handler_done; 517 bri ex_handler_done;
516 518
517ex_shw: 519ex_shw:
518 /* Store the lower half-word, byte-by-byte into destination address */ 520 /* Store the lower half-word, byte-by-byte into destination address */
519 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 521 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
520 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 522 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
521 sbi r3, r4, 0; 523 sbi r4, r3, 0;
522 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 524 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
523 sbi r3, r4, 1; 525 sbi r4, r3, 1;
524ex_sw_end: /* Exception handling of store word, ends. */ 526ex_sw_end: /* Exception handling of store word, ends. */
525 527
526ex_handler_done: 528ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
560 */ 562 */
561 mfs r11, rpid 563 mfs r11, rpid
562 nop 564 nop
563 bri 4
564 mfs r3, rear /* Get faulting address */
565 nop
566 /* If we are faulting a kernel address, we have to use the 565 /* If we are faulting a kernel address, we have to use the
567 * kernel page tables. 566 * kernel page tables.
568 */ 567 */
569 ori r4, r0, CONFIG_KERNEL_START 568 ori r5, r0, CONFIG_KERNEL_START
570 cmpu r4, r3, r4 569 cmpu r5, r3, r5
571 bgti r4, ex3 570 bgti r5, ex3
572 /* First, check if it was a zone fault (which means a user 571 /* First, check if it was a zone fault (which means a user
573 * tried to access a kernel or read-protected page - always 572 * tried to access a kernel or read-protected page - always
574 * a SEGV). All other faults here must be stores, so no 573 * a SEGV). All other faults here must be stores, so no
575 * need to check ESR_S as well. */ 574 * need to check ESR_S as well. */
576 mfs r4, resr
577 nop
578 andi r4, r4, 0x800 /* ESR_Z - zone protection */ 575 andi r4, r4, 0x800 /* ESR_Z - zone protection */
579 bnei r4, ex2 576 bnei r4, ex2
580 577
@@ -589,8 +586,6 @@ ex_handler_done:
589 * tried to access a kernel or read-protected page - always 586 * tried to access a kernel or read-protected page - always
590 * a SEGV). All other faults here must be stores, so no 587 * a SEGV). All other faults here must be stores, so no
591 * need to check ESR_S as well. */ 588 * need to check ESR_S as well. */
592 mfs r4, resr
593 nop
594 andi r4, r4, 0x800 /* ESR_Z */ 589 andi r4, r4, 0x800 /* ESR_Z */
595 bnei r4, ex2 590 bnei r4, ex2
596 /* get current task address */ 591 /* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
665 * R3 = ESR 660 * R3 = ESR
666 */ 661 */
667 662
668 mfs r3, rear /* Get faulting address */
669 nop
670 RESTORE_STATE; 663 RESTORE_STATE;
671 bri page_fault_instr_trap 664 bri page_fault_instr_trap
672 665
@@ -677,18 +670,15 @@ ex_handler_done:
677 */ 670 */
678 handle_data_tlb_miss_exception: 671 handle_data_tlb_miss_exception:
679 /* Working registers already saved: R3, R4, R5, R6 672 /* Working registers already saved: R3, R4, R5, R6
680 * R3 = ESR 673 * R3 = EAR, R4 = ESR
681 */ 674 */
682 mfs r11, rpid 675 mfs r11, rpid
683 nop 676 nop
684 bri 4
685 mfs r3, rear /* Get faulting address */
686 nop
687 677
688 /* If we are faulting a kernel address, we have to use the 678 /* If we are faulting a kernel address, we have to use the
689 * kernel page tables. */ 679 * kernel page tables. */
690 ori r4, r0, CONFIG_KERNEL_START 680 ori r6, r0, CONFIG_KERNEL_START
691 cmpu r4, r3, r4 681 cmpu r4, r3, r6
692 bgti r4, ex5 682 bgti r4, ex5
693 ori r4, r0, swapper_pg_dir 683 ori r4, r0, swapper_pg_dir
694 mts rpid, r0 /* TLB will have 0 TID */ 684 mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
731 * Many of these bits are software only. Bits we don't set 721 * Many of these bits are software only. Bits we don't set
732 * here we (properly should) assume have the appropriate value. 722 * here we (properly should) assume have the appropriate value.
733 */ 723 */
724 brid finish_tlb_load
734 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 725 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
735
736 bri finish_tlb_load
737 ex7: 726 ex7:
738 /* The bailout. Restore registers to pre-exception conditions 727 /* The bailout. Restore registers to pre-exception conditions
739 * and call the heavyweights to help us out. 728 * and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
754 */ 743 */
755 mfs r11, rpid 744 mfs r11, rpid
756 nop 745 nop
757 bri 4
758 mfs r3, rear /* Get faulting address */
759 nop
760 746
761 /* If we are faulting a kernel address, we have to use the 747 /* If we are faulting a kernel address, we have to use the
762 * kernel page tables. 748 * kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
792 lwi r4, r5, 0 /* Get Linux PTE */ 778 lwi r4, r5, 0 /* Get Linux PTE */
793 779
794 andi r6, r4, _PAGE_PRESENT 780 andi r6, r4, _PAGE_PRESENT
795 beqi r6, ex7 781 beqi r6, ex10
796 782
797 ori r4, r4, _PAGE_ACCESSED 783 ori r4, r4, _PAGE_ACCESSED
798 swi r4, r5, 0 784 swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
805 * Many of these bits are software only. Bits we don't set 791 * Many of these bits are software only. Bits we don't set
806 * here we (properly should) assume have the appropriate value. 792 * here we (properly should) assume have the appropriate value.
807 */ 793 */
794 brid finish_tlb_load
808 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 795 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
809
810 bri finish_tlb_load
811 ex10: 796 ex10:
812 /* The bailout. Restore registers to pre-exception conditions 797 /* The bailout. Restore registers to pre-exception conditions
813 * and call the heavyweights to help us out. 798 * and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
837 andi r5, r5, (MICROBLAZE_TLB_SIZE-1) 822 andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
838 ori r6, r0, 1 823 ori r6, r0, 1
839 cmp r31, r5, r6 824 cmp r31, r5, r6
840 blti r31, sem 825 blti r31, ex12
841 addik r5, r6, 1 826 addik r5, r6, 1
842 sem: 827 ex12:
843 /* MS: save back current TLB index */ 828 /* MS: save back current TLB index */
844 swi r5, r0, TOPHYS(tlb_index) 829 swi r5, r0, TOPHYS(tlb_index)
845 830
@@ -859,7 +844,6 @@ ex_handler_done:
859 nop 844 nop
860 845
861 /* Done...restore registers and get out of here. */ 846 /* Done...restore registers and get out of here. */
862 ex12:
863 mts rpid, r11 847 mts rpid, r11
864 nop 848 nop
865 bri 4 849 bri 4
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 6eea6f92b84e..03172c1da770 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -42,8 +42,16 @@ unsigned int nr_irq;
42 42
43static void intc_enable_or_unmask(unsigned int irq) 43static void intc_enable_or_unmask(unsigned int irq)
44{ 44{
45 unsigned long mask = 1 << irq;
45 pr_debug("enable_or_unmask: %d\n", irq); 46 pr_debug("enable_or_unmask: %d\n", irq);
46 out_be32(INTC_BASE + SIE, 1 << irq); 47 out_be32(INTC_BASE + SIE, mask);
48
49 /* ack level irqs because they can't be acked during
50 * ack function since the handle_level_irq function
51 * acks the irq before calling the interrupt handler
52 */
53 if (irq_desc[irq].status & IRQ_LEVEL)
54 out_be32(INTC_BASE + IAR, mask);
47} 55}
48 56
49static void intc_disable_or_mask(unsigned int irq) 57static void intc_disable_or_mask(unsigned int irq)
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 7d5ddd62d4d2..8f120aca123d 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/ftrace.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/hardirq.h> 14#include <linux/hardirq.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -32,7 +33,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
32 33
33static u32 concurrent_irq; 34static u32 concurrent_irq;
34 35
35void do_IRQ(struct pt_regs *regs) 36void __irq_entry do_IRQ(struct pt_regs *regs)
36{ 37{
37 unsigned int irq; 38 unsigned int irq;
38 struct pt_regs *old_regs = set_irq_regs(regs); 39 struct pt_regs *old_regs = set_irq_regs(regs);
@@ -68,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
68 } 69 }
69 70
70 if (i < nr_irq) { 71 if (i < nr_irq) {
71 spin_lock_irqsave(&irq_desc[i].lock, flags); 72 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
72 action = irq_desc[i].action; 73 action = irq_desc[i].action;
73 if (!action) 74 if (!action)
74 goto skip; 75 goto skip;
@@ -89,7 +90,22 @@ int show_interrupts(struct seq_file *p, void *v)
89 90
90 seq_putc(p, '\n'); 91 seq_putc(p, '\n');
91skip: 92skip:
92 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 93 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
93 } 94 }
94 return 0; 95 return 0;
95} 96}
97
98/* MS: There is no any advance mapping mechanism. We are using simple 32bit
99 intc without any cascades or any connection that's why mapping is 1:1 */
100unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
101{
102 return hwirq;
103}
104EXPORT_SYMBOL_GPL(irq_create_mapping);
105
106unsigned int irq_create_of_mapping(struct device_node *controller,
107 u32 *intspec, unsigned int intsize)
108{
109 return intspec[0];
110}
111EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
new file mode 100644
index 000000000000..e7eaa7a8cbd3
--- /dev/null
+++ b/arch/microblaze/kernel/mcount.S
@@ -0,0 +1,170 @@
1/*
2 * Low-level ftrace handling
3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 */
11
12#include <linux/linkage.h>
13
14#define NOALIGN_ENTRY(name) .globl name; name:
15
16/* FIXME MS: I think that I don't need to save all regs */
17#define SAVE_REGS \
18 addik r1, r1, -120; \
19 swi r2, r1, 4; \
20 swi r3, r1, 8; \
21 swi r4, r1, 12; \
22 swi r5, r1, 116; \
23 swi r6, r1, 16; \
24 swi r7, r1, 20; \
25 swi r8, r1, 24; \
26 swi r9, r1, 28; \
27 swi r10, r1, 32; \
28 swi r11, r1, 36; \
29 swi r12, r1, 40; \
30 swi r13, r1, 44; \
31 swi r14, r1, 48; \
32 swi r16, r1, 52; \
33 swi r17, r1, 56; \
34 swi r18, r1, 60; \
35 swi r19, r1, 64; \
36 swi r20, r1, 68; \
37 swi r21, r1, 72; \
38 swi r22, r1, 76; \
39 swi r23, r1, 80; \
40 swi r24, r1, 84; \
41 swi r25, r1, 88; \
42 swi r26, r1, 92; \
43 swi r27, r1, 96; \
44 swi r28, r1, 100; \
45 swi r29, r1, 104; \
46 swi r30, r1, 108; \
47 swi r31, r1, 112;
48
49#define RESTORE_REGS \
50 lwi r2, r1, 4; \
51 lwi r3, r1, 8; \
52 lwi r4, r1, 12; \
53 lwi r5, r1, 116; \
54 lwi r6, r1, 16; \
55 lwi r7, r1, 20; \
56 lwi r8, r1, 24; \
57 lwi r9, r1, 28; \
58 lwi r10, r1, 32; \
59 lwi r11, r1, 36; \
60 lwi r12, r1, 40; \
61 lwi r13, r1, 44; \
62 lwi r14, r1, 48; \
63 lwi r16, r1, 52; \
64 lwi r17, r1, 56; \
65 lwi r18, r1, 60; \
66 lwi r19, r1, 64; \
67 lwi r20, r1, 68; \
68 lwi r21, r1, 72; \
69 lwi r22, r1, 76; \
70 lwi r23, r1, 80; \
71 lwi r24, r1, 84; \
72 lwi r25, r1, 88; \
73 lwi r26, r1, 92; \
74 lwi r27, r1, 96; \
75 lwi r28, r1, 100; \
76 lwi r29, r1, 104; \
77 lwi r30, r1, 108; \
78 lwi r31, r1, 112; \
79 addik r1, r1, 120;
80
81ENTRY(ftrace_stub)
82 rtsd r15, 8;
83 nop;
84
85ENTRY(_mcount)
86#ifdef CONFIG_DYNAMIC_FTRACE
87ENTRY(ftrace_caller)
88 /* MS: It is just barrier which is removed from C code */
89 rtsd r15, 8
90 nop
91#endif /* CONFIG_DYNAMIC_FTRACE */
92 SAVE_REGS
93 swi r15, r1, 0;
94 /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */
95 lwi r5, r0, function_trace_stop;
96 bneid r5, end;
97 nop;
98 /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */
99#ifdef CONFIG_FUNCTION_GRAPH_TRACER
100#ifndef CONFIG_DYNAMIC_FTRACE
101 lwi r5, r0, ftrace_graph_return;
102 addik r6, r0, ftrace_stub; /* asm implementation */
103 cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */
104 beqid r5, end_graph_tracer;
105 nop;
106
107 lwi r6, r0, ftrace_graph_entry;
108 addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */
109 cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */
110 beqid r5, end_graph_tracer;
111 nop;
112#else /* CONFIG_DYNAMIC_FTRACE */
113NOALIGN_ENTRY(ftrace_call_graph)
114 /* MS: jump over graph function - replaced from C code */
115 bri end_graph_tracer
116#endif /* CONFIG_DYNAMIC_FTRACE */
117 addik r5, r1, 120; /* MS: load parent addr */
118 addik r6, r15, 0; /* MS: load current function addr */
119 bralid r15, prepare_ftrace_return;
120 nop;
121 /* MS: graph was taken that's why - can jump over function trace */
122 brid end;
123 nop;
124end_graph_tracer:
125#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
126#ifndef CONFIG_DYNAMIC_FTRACE
127 /* MS: test function trace if is taken or not */
128 lwi r20, r0, ftrace_trace_function;
129 addik r6, r0, ftrace_stub;
130 cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */
131 beqid r5, end; /* MS: not taken -> jump over */
132 nop;
133#else /* CONFIG_DYNAMIC_FTRACE */
134NOALIGN_ENTRY(ftrace_call)
135/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */
136 nop
137 nop
138#endif /* CONFIG_DYNAMIC_FTRACE */
139/* static normal trace */
140 lwi r6, r1, 120; /* MS: load parent addr */
141 addik r5, r15, 0; /* MS: load current function addr */
142 /* MS: here is dependency on previous code */
143 brald r15, r20; /* MS: jump to ftrace handler */
144 nop;
145end:
146 lwi r15, r1, 0;
147 RESTORE_REGS
148
149 rtsd r15, 8; /* MS: jump back */
150 nop;
151
152#ifdef CONFIG_FUNCTION_GRAPH_TRACER
153ENTRY(return_to_handler)
154 nop; /* MS: just barrier for rtsd r15, 8 */
155 nop;
156 SAVE_REGS
157 swi r15, r1, 0;
158
159 /* MS: find out returning address */
160 bralid r15, ftrace_return_to_handler;
161 nop;
162
163 /* MS: return value from ftrace_return_to_handler is my returning addr
164 * must be before restore regs because I have to restore r3 content */
165 addik r15, r3, 0;
166 RESTORE_REGS
167
168 rtsd r15, 8; /* MS: jump back */
169 nop;
170#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 59ff20e33e0c..ff85f7718035 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -18,6 +18,7 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <linux/ftrace.h>
21#include <linux/uaccess.h> 22#include <linux/uaccess.h>
22 23
23/* 24/*
@@ -47,3 +48,18 @@ extern void __umodsi3(void);
47EXPORT_SYMBOL(__umodsi3); 48EXPORT_SYMBOL(__umodsi3);
48extern char *_ebss; 49extern char *_ebss;
49EXPORT_SYMBOL_GPL(_ebss); 50EXPORT_SYMBOL_GPL(_ebss);
51#ifdef CONFIG_FUNCTION_TRACER
52extern void _mcount(void);
53EXPORT_SYMBOL(_mcount);
54#endif
55
56/*
57 * Assembly functions that may be used (directly or indirectly) by modules
58 */
59EXPORT_SYMBOL(__copy_tofrom_user);
60EXPORT_SYMBOL(__strncpy_user);
61
62#ifdef CONFIG_OPT_LIB_ASM
63EXPORT_SYMBOL(memcpy);
64EXPORT_SYMBOL(memmove);
65#endif
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8e..0fb5fc6c1fc2 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
26 * We avoid flushing the pinned 0, 1 and possibly 2 entries. 26 * We avoid flushing the pinned 0, 1 and possibly 2 entries.
27 */ 27 */
28.globl _tlbia; 28.globl _tlbia;
29.type _tlbia, @function
29.align 4; 30.align 4;
30_tlbia: 31_tlbia:
31 addik r12, r0, 63 /* flush all entries (63 - 3) */ 32 addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
32 /* isync */ 33 /* isync */
33_tlbia_1: 34_tlbia_1:
34 mts rtlbx, r12 35 mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
41 /* sync */ 42 /* sync */
42 rtsd r15, 8 43 rtsd r15, 8
43 nop 44 nop
45 .size _tlbia, . - _tlbia
44 46
45/* 47/*
46 * Flush MMU TLB for a particular address (in r5) 48 * Flush MMU TLB for a particular address (in r5)
47 */ 49 */
48.globl _tlbie; 50.globl _tlbie;
51.type _tlbie, @function
49.align 4; 52.align 4;
50_tlbie: 53_tlbie:
51 mts rtlbsx, r5 /* look up the address in TLB */ 54 mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
59 rtsd r15, 8 62 rtsd r15, 8
60 nop 63 nop
61 64
65 .size _tlbie, . - _tlbie
66
62/* 67/*
63 * Allocate TLB entry for early console 68 * Allocate TLB entry for early console
64 */ 69 */
65.globl early_console_reg_tlb_alloc; 70.globl early_console_reg_tlb_alloc;
71.type early_console_reg_tlb_alloc, @function
66.align 4; 72.align 4;
67early_console_reg_tlb_alloc: 73early_console_reg_tlb_alloc:
68 /* 74 /*
69 * Load a TLB entry for the UART, so that microblaze_progress() can use 75 * Load a TLB entry for the UART, so that microblaze_progress() can use
70 * the UARTs nice and early. We use a 4k real==virtual mapping. 76 * the UARTs nice and early. We use a 4k real==virtual mapping.
71 */ 77 */
72 ori r4, r0, 63 78 ori r4, r0, MICROBLAZE_TLB_SIZE - 1
73 mts rtlbx, r4 /* TLB slot 2 */ 79 mts rtlbx, r4 /* TLB slot 2 */
74 80
75 or r4,r5,r0 81 or r4,r5,r0
@@ -86,35 +92,4 @@ early_console_reg_tlb_alloc:
86 rtsd r15, 8 92 rtsd r15, 8
87 nop 93 nop
88 94
89/* 95 .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
90 * Copy a whole page (4096 bytes).
91 */
92#define COPY_16_BYTES \
93 lwi r7, r6, 0; \
94 lwi r8, r6, 4; \
95 lwi r9, r6, 8; \
96 lwi r10, r6, 12; \
97 swi r7, r5, 0; \
98 swi r8, r5, 4; \
99 swi r9, r5, 8; \
100 swi r10, r5, 12
101
102
103/* FIXME DCACHE_LINE_BYTES (CONFIG_XILINX_MICROBLAZE0_DCACHE_LINE_LEN * 4)*/
104#define DCACHE_LINE_BYTES (4 * 4)
105
106.globl copy_page;
107.align 4;
108copy_page:
109 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
110_copy_page_loop:
111 COPY_16_BYTES
112#if DCACHE_LINE_BYTES >= 32
113 COPY_16_BYTES
114#endif
115 addik r6, r6, DCACHE_LINE_BYTES
116 addik r5, r5, DCACHE_LINE_BYTES
117 bneid r11, _copy_page_loop
118 addik r11, r11, -1
119 rtsd r15, 8
120 nop
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 5a45b1adfef1..0e73f6606547 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -12,11 +12,11 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/elf.h> 13#include <linux/elf.h>
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/slab.h>
16#include <linux/fs.h> 15#include <linux/fs.h>
17#include <linux/string.h> 16#include <linux/string.h>
18 17
19#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/cacheflush.h>
20 20
21void *module_alloc(unsigned long size) 21void *module_alloc(unsigned long size)
22{ 22{
@@ -152,6 +152,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
152int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, 152int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
153 struct module *module) 153 struct module *module)
154{ 154{
155 flush_dcache();
155 return 0; 156 return 0;
156} 157}
157 158
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c
index acf4574d0f18..0dc755286d38 100644
--- a/arch/microblaze/kernel/of_platform.c
+++ b/arch/microblaze/kernel/of_platform.c
@@ -17,7 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mod_devicetable.h> 19#include <linux/mod_devicetable.h>
20#include <linux/slab.h>
21#include <linux/pci.h> 20#include <linux/pci.h>
22#include <linux/of.h> 21#include <linux/of.h>
23#include <linux/of_device.h> 22#include <linux/of_device.h>
@@ -185,7 +184,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
185static int of_dev_phandle_match(struct device *dev, void *data) 184static int of_dev_phandle_match(struct device *dev, void *data)
186{ 185{
187 phandle *ph = data; 186 phandle *ph = data;
188 return to_of_device(dev)->node->linux_phandle == *ph; 187 return to_of_device(dev)->node->phandle == *ph;
189} 188}
190 189
191struct of_device *of_find_device_by_phandle(phandle ph) 190struct of_device *of_find_device_by_phandle(phandle ph)
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index c592d475b3d8..09bed44dfcd3 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,8 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/uaccess.h> /* for USER_DS macros */
19#include <asm/cacheflush.h>
18 20
19void show_regs(struct pt_regs *regs) 21void show_regs(struct pt_regs *regs)
20{ 22{
@@ -73,7 +75,10 @@ __setup("hlt", hlt_setup);
73 75
74void default_idle(void) 76void default_idle(void)
75{ 77{
76 if (!hlt_counter) { 78 if (likely(hlt_counter)) {
79 while (!need_resched())
80 cpu_relax();
81 } else {
77 clear_thread_flag(TIF_POLLING_NRFLAG); 82 clear_thread_flag(TIF_POLLING_NRFLAG);
78 smp_mb__after_clear_bit(); 83 smp_mb__after_clear_bit();
79 local_irq_disable(); 84 local_irq_disable();
@@ -81,9 +86,7 @@ void default_idle(void)
81 cpu_sleep(); 86 cpu_sleep();
82 local_irq_enable(); 87 local_irq_enable();
83 set_thread_flag(TIF_POLLING_NRFLAG); 88 set_thread_flag(TIF_POLLING_NRFLAG);
84 } else 89 }
85 while (!need_resched())
86 cpu_relax();
87} 90}
88 91
89void cpu_idle(void) 92void cpu_idle(void)
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index c005cc6f1aaf..a15ef6d67ca9 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -42,697 +42,20 @@
42#include <asm/sections.h> 42#include <asm/sections.h>
43#include <asm/pci-bridge.h> 43#include <asm/pci-bridge.h>
44 44
45static int __initdata dt_root_addr_cells; 45void __init early_init_dt_scan_chosen_arch(unsigned long node)
46static int __initdata dt_root_size_cells;
47
48typedef u32 cell_t;
49
50static struct boot_param_header *initial_boot_params;
51
52/* export that to outside world */
53struct device_node *of_chosen;
54
55static inline char *find_flat_dt_string(u32 offset)
56{
57 return ((char *)initial_boot_params) +
58 initial_boot_params->off_dt_strings + offset;
59}
60
61/**
62 * This function is used to scan the flattened device-tree, it is
63 * used to extract the memory informations at boot before we can
64 * unflatten the tree
65 */
66int __init of_scan_flat_dt(int (*it)(unsigned long node,
67 const char *uname, int depth,
68 void *data),
69 void *data)
70{
71 unsigned long p = ((unsigned long)initial_boot_params) +
72 initial_boot_params->off_dt_struct;
73 int rc = 0;
74 int depth = -1;
75
76 do {
77 u32 tag = *((u32 *)p);
78 char *pathp;
79
80 p += 4;
81 if (tag == OF_DT_END_NODE) {
82 depth--;
83 continue;
84 }
85 if (tag == OF_DT_NOP)
86 continue;
87 if (tag == OF_DT_END)
88 break;
89 if (tag == OF_DT_PROP) {
90 u32 sz = *((u32 *)p);
91 p += 8;
92 if (initial_boot_params->version < 0x10)
93 p = _ALIGN(p, sz >= 8 ? 8 : 4);
94 p += sz;
95 p = _ALIGN(p, 4);
96 continue;
97 }
98 if (tag != OF_DT_BEGIN_NODE) {
99 printk(KERN_WARNING "Invalid tag %x scanning flattened"
100 " device tree !\n", tag);
101 return -EINVAL;
102 }
103 depth++;
104 pathp = (char *)p;
105 p = _ALIGN(p + strlen(pathp) + 1, 4);
106 if ((*pathp) == '/') {
107 char *lp, *np;
108 for (lp = NULL, np = pathp; *np; np++)
109 if ((*np) == '/')
110 lp = np+1;
111 if (lp != NULL)
112 pathp = lp;
113 }
114 rc = it(p, pathp, depth, data);
115 if (rc != 0)
116 break;
117 } while (1);
118
119 return rc;
120}
121
122unsigned long __init of_get_flat_dt_root(void)
123{
124 unsigned long p = ((unsigned long)initial_boot_params) +
125 initial_boot_params->off_dt_struct;
126
127 while (*((u32 *)p) == OF_DT_NOP)
128 p += 4;
129 BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE);
130 p += 4;
131 return _ALIGN(p + strlen((char *)p) + 1, 4);
132}
133
134/**
135 * This function can be used within scan_flattened_dt callback to get
136 * access to properties
137 */
138void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
139 unsigned long *size)
140{
141 unsigned long p = node;
142
143 do {
144 u32 tag = *((u32 *)p);
145 u32 sz, noff;
146 const char *nstr;
147
148 p += 4;
149 if (tag == OF_DT_NOP)
150 continue;
151 if (tag != OF_DT_PROP)
152 return NULL;
153
154 sz = *((u32 *)p);
155 noff = *((u32 *)(p + 4));
156 p += 8;
157 if (initial_boot_params->version < 0x10)
158 p = _ALIGN(p, sz >= 8 ? 8 : 4);
159
160 nstr = find_flat_dt_string(noff);
161 if (nstr == NULL) {
162 printk(KERN_WARNING "Can't find property index"
163 " name !\n");
164 return NULL;
165 }
166 if (strcmp(name, nstr) == 0) {
167 if (size)
168 *size = sz;
169 return (void *)p;
170 }
171 p += sz;
172 p = _ALIGN(p, 4);
173 } while (1);
174}
175
176int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
177{
178 const char *cp;
179 unsigned long cplen, l;
180
181 cp = of_get_flat_dt_prop(node, "compatible", &cplen);
182 if (cp == NULL)
183 return 0;
184 while (cplen > 0) {
185 if (strncasecmp(cp, compat, strlen(compat)) == 0)
186 return 1;
187 l = strlen(cp) + 1;
188 cp += l;
189 cplen -= l;
190 }
191
192 return 0;
193}
194
195static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
196 unsigned long align)
197{
198 void *res;
199
200 *mem = _ALIGN(*mem, align);
201 res = (void *)*mem;
202 *mem += size;
203
204 return res;
205}
206
207static unsigned long __init unflatten_dt_node(unsigned long mem,
208 unsigned long *p,
209 struct device_node *dad,
210 struct device_node ***allnextpp,
211 unsigned long fpsize)
212{
213 struct device_node *np;
214 struct property *pp, **prev_pp = NULL;
215 char *pathp;
216 u32 tag;
217 unsigned int l, allocl;
218 int has_name = 0;
219 int new_format = 0;
220
221 tag = *((u32 *)(*p));
222 if (tag != OF_DT_BEGIN_NODE) {
223 printk("Weird tag at start of node: %x\n", tag);
224 return mem;
225 }
226 *p += 4;
227 pathp = (char *)*p;
228 l = allocl = strlen(pathp) + 1;
229 *p = _ALIGN(*p + l, 4);
230
231 /* version 0x10 has a more compact unit name here instead of the full
232 * path. we accumulate the full path size using "fpsize", we'll rebuild
233 * it later. We detect this because the first character of the name is
234 * not '/'.
235 */
236 if ((*pathp) != '/') {
237 new_format = 1;
238 if (fpsize == 0) {
239 /* root node: special case. fpsize accounts for path
240 * plus terminating zero. root node only has '/', so
241 * fpsize should be 2, but we want to avoid the first
242 * level nodes to have two '/' so we use fpsize 1 here
243 */
244 fpsize = 1;
245 allocl = 2;
246 } else {
247 /* account for '/' and path size minus terminal 0
248 * already in 'l'
249 */
250 fpsize += l;
251 allocl = fpsize;
252 }
253 }
254
255 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
256 __alignof__(struct device_node));
257 if (allnextpp) {
258 memset(np, 0, sizeof(*np));
259 np->full_name = ((char *)np) + sizeof(struct device_node);
260 if (new_format) {
261 char *p2 = np->full_name;
262 /* rebuild full path for new format */
263 if (dad && dad->parent) {
264 strcpy(p2, dad->full_name);
265#ifdef DEBUG
266 if ((strlen(p2) + l + 1) != allocl) {
267 pr_debug("%s: p: %d, l: %d, a: %d\n",
268 pathp, (int)strlen(p2),
269 l, allocl);
270 }
271#endif
272 p2 += strlen(p2);
273 }
274 *(p2++) = '/';
275 memcpy(p2, pathp, l);
276 } else
277 memcpy(np->full_name, pathp, l);
278 prev_pp = &np->properties;
279 **allnextpp = np;
280 *allnextpp = &np->allnext;
281 if (dad != NULL) {
282 np->parent = dad;
283 /* we temporarily use the next field as `last_child'*/
284 if (dad->next == NULL)
285 dad->child = np;
286 else
287 dad->next->sibling = np;
288 dad->next = np;
289 }
290 kref_init(&np->kref);
291 }
292 while (1) {
293 u32 sz, noff;
294 char *pname;
295
296 tag = *((u32 *)(*p));
297 if (tag == OF_DT_NOP) {
298 *p += 4;
299 continue;
300 }
301 if (tag != OF_DT_PROP)
302 break;
303 *p += 4;
304 sz = *((u32 *)(*p));
305 noff = *((u32 *)((*p) + 4));
306 *p += 8;
307 if (initial_boot_params->version < 0x10)
308 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
309
310 pname = find_flat_dt_string(noff);
311 if (pname == NULL) {
312 printk(KERN_INFO
313 "Can't find property name in list !\n");
314 break;
315 }
316 if (strcmp(pname, "name") == 0)
317 has_name = 1;
318 l = strlen(pname) + 1;
319 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
320 __alignof__(struct property));
321 if (allnextpp) {
322 if (strcmp(pname, "linux,phandle") == 0) {
323 np->node = *((u32 *)*p);
324 if (np->linux_phandle == 0)
325 np->linux_phandle = np->node;
326 }
327 if (strcmp(pname, "ibm,phandle") == 0)
328 np->linux_phandle = *((u32 *)*p);
329 pp->name = pname;
330 pp->length = sz;
331 pp->value = (void *)*p;
332 *prev_pp = pp;
333 prev_pp = &pp->next;
334 }
335 *p = _ALIGN((*p) + sz, 4);
336 }
337 /* with version 0x10 we may not have the name property, recreate
338 * it here from the unit name if absent
339 */
340 if (!has_name) {
341 char *p1 = pathp, *ps = pathp, *pa = NULL;
342 int sz;
343
344 while (*p1) {
345 if ((*p1) == '@')
346 pa = p1;
347 if ((*p1) == '/')
348 ps = p1 + 1;
349 p1++;
350 }
351 if (pa < ps)
352 pa = p1;
353 sz = (pa - ps) + 1;
354 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
355 __alignof__(struct property));
356 if (allnextpp) {
357 pp->name = "name";
358 pp->length = sz;
359 pp->value = pp + 1;
360 *prev_pp = pp;
361 prev_pp = &pp->next;
362 memcpy(pp->value, ps, sz - 1);
363 ((char *)pp->value)[sz - 1] = 0;
364 pr_debug("fixed up name for %s -> %s\n", pathp,
365 (char *)pp->value);
366 }
367 }
368 if (allnextpp) {
369 *prev_pp = NULL;
370 np->name = of_get_property(np, "name", NULL);
371 np->type = of_get_property(np, "device_type", NULL);
372
373 if (!np->name)
374 np->name = "<NULL>";
375 if (!np->type)
376 np->type = "<NULL>";
377 }
378 while (tag == OF_DT_BEGIN_NODE) {
379 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
380 tag = *((u32 *)(*p));
381 }
382 if (tag != OF_DT_END_NODE) {
383 printk(KERN_INFO "Weird tag at end of node: %x\n", tag);
384 return mem;
385 }
386 *p += 4;
387 return mem;
388}
389
390/**
391 * unflattens the device-tree passed by the firmware, creating the
392 * tree of struct device_node. It also fills the "name" and "type"
393 * pointers of the nodes so the normal device-tree walking functions
394 * can be used (this used to be done by finish_device_tree)
395 */
396void __init unflatten_device_tree(void)
397{
398 unsigned long start, mem, size;
399 struct device_node **allnextp = &allnodes;
400
401 pr_debug(" -> unflatten_device_tree()\n");
402
403 /* First pass, scan for size */
404 start = ((unsigned long)initial_boot_params) +
405 initial_boot_params->off_dt_struct;
406 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
407 size = (size | 3) + 1;
408
409 pr_debug(" size is %lx, allocating...\n", size);
410
411 /* Allocate memory for the expanded device tree */
412 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
413 mem = (unsigned long) __va(mem);
414
415 ((u32 *)mem)[size / 4] = 0xdeadbeef;
416
417 pr_debug(" unflattening %lx...\n", mem);
418
419 /* Second pass, do actual unflattening */
420 start = ((unsigned long)initial_boot_params) +
421 initial_boot_params->off_dt_struct;
422 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
423 if (*((u32 *)start) != OF_DT_END)
424 printk(KERN_WARNING "Weird tag at end of tree: %08x\n",
425 *((u32 *)start));
426 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
427 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
428 ((u32 *)mem)[size / 4]);
429 *allnextp = NULL;
430
431 /* Get pointer to OF "/chosen" node for use everywhere */
432 of_chosen = of_find_node_by_path("/chosen");
433 if (of_chosen == NULL)
434 of_chosen = of_find_node_by_path("/chosen@0");
435
436 pr_debug(" <- unflatten_device_tree()\n");
437}
438
439#define early_init_dt_scan_drconf_memory(node) 0
440
441static int __init early_init_dt_scan_cpus(unsigned long node,
442 const char *uname, int depth,
443 void *data)
444{
445 static int logical_cpuid;
446 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
447 const u32 *intserv;
448 int i, nthreads;
449 int found = 0;
450
451 /* We are scanning "cpu" nodes only */
452 if (type == NULL || strcmp(type, "cpu") != 0)
453 return 0;
454
455 /* Get physical cpuid */
456 intserv = of_get_flat_dt_prop(node, "reg", NULL);
457 nthreads = 1;
458
459 /*
460 * Now see if any of these threads match our boot cpu.
461 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
462 */
463 for (i = 0; i < nthreads; i++) {
464 /*
465 * version 2 of the kexec param format adds the phys cpuid of
466 * booted proc.
467 */
468 if (initial_boot_params && initial_boot_params->version >= 2) {
469 if (intserv[i] ==
470 initial_boot_params->boot_cpuid_phys) {
471 found = 1;
472 break;
473 }
474 } else {
475 /*
476 * Check if it's the boot-cpu, set it's hw index now,
477 * unfortunately this format did not support booting
478 * off secondary threads.
479 */
480 if (of_get_flat_dt_prop(node,
481 "linux,boot-cpu", NULL) != NULL) {
482 found = 1;
483 break;
484 }
485 }
486
487#ifdef CONFIG_SMP
488 /* logical cpu id is always 0 on UP kernels */
489 logical_cpuid++;
490#endif
491 }
492
493 if (found) {
494 pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
495 intserv[i]);
496 boot_cpuid = logical_cpuid;
497 }
498
499 return 0;
500}
501
502#ifdef CONFIG_BLK_DEV_INITRD
503static void __init early_init_dt_check_for_initrd(unsigned long node)
504{
505 unsigned long l;
506 u32 *prop;
507
508 pr_debug("Looking for initrd properties... ");
509
510 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
511 if (prop) {
512 initrd_start = (unsigned long)
513 __va((u32)of_read_ulong(prop, l/4));
514
515 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
516 if (prop) {
517 initrd_end = (unsigned long)
518 __va((u32)of_read_ulong(prop, 1/4));
519 initrd_below_start_ok = 1;
520 } else {
521 initrd_start = 0;
522 }
523 }
524
525 pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n",
526 initrd_start, initrd_end);
527}
528#else
529static inline void early_init_dt_check_for_initrd(unsigned long node)
530{
531}
532#endif /* CONFIG_BLK_DEV_INITRD */
533
534static int __init early_init_dt_scan_chosen(unsigned long node,
535 const char *uname, int depth, void *data)
536{
537 unsigned long l;
538 char *p;
539
540 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
541
542 if (depth != 1 ||
543 (strcmp(uname, "chosen") != 0 &&
544 strcmp(uname, "chosen@0") != 0))
545 return 0;
546
547#ifdef CONFIG_KEXEC
548 lprop = (u64 *)of_get_flat_dt_prop(node,
549 "linux,crashkernel-base", NULL);
550 if (lprop)
551 crashk_res.start = *lprop;
552
553 lprop = (u64 *)of_get_flat_dt_prop(node,
554 "linux,crashkernel-size", NULL);
555 if (lprop)
556 crashk_res.end = crashk_res.start + *lprop - 1;
557#endif
558
559 early_init_dt_check_for_initrd(node);
560
561 /* Retreive command line */
562 p = of_get_flat_dt_prop(node, "bootargs", &l);
563 if (p != NULL && l > 0)
564 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
565
566#ifdef CONFIG_CMDLINE
567#ifndef CONFIG_CMDLINE_FORCE
568 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
569#endif
570 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
571#endif /* CONFIG_CMDLINE */
572
573 pr_debug("Command line is: %s\n", cmd_line);
574
575 /* break now */
576 return 1;
577}
578
579static int __init early_init_dt_scan_root(unsigned long node,
580 const char *uname, int depth, void *data)
581{
582 u32 *prop;
583
584 if (depth != 0)
585 return 0;
586
587 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
588 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
589 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
590
591 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
592 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
593 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
594
595 /* break now */
596 return 1;
597}
598
599static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
600{
601 cell_t *p = *cellp;
602
603 *cellp = p + s;
604 return of_read_number(p, s);
605}
606
607static int __init early_init_dt_scan_memory(unsigned long node,
608 const char *uname, int depth, void *data)
609{ 46{
610 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 47 /* No Microblaze specific code here */
611 cell_t *reg, *endp;
612 unsigned long l;
613
614 /* Look for the ibm,dynamic-reconfiguration-memory node */
615/* if (depth == 1 &&
616 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
617 return early_init_dt_scan_drconf_memory(node);
618*/
619 /* We are scanning "memory" nodes only */
620 if (type == NULL) {
621 /*
622 * The longtrail doesn't have a device_type on the
623 * /memory node, so look for the node called /memory@0.
624 */
625 if (depth != 1 || strcmp(uname, "memory@0") != 0)
626 return 0;
627 } else if (strcmp(type, "memory") != 0)
628 return 0;
629
630 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
631 if (reg == NULL)
632 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
633 if (reg == NULL)
634 return 0;
635
636 endp = reg + (l / sizeof(cell_t));
637
638 pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
639 uname, l, reg[0], reg[1], reg[2], reg[3]);
640
641 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
642 u64 base, size;
643
644 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
645 size = dt_mem_next_cell(dt_root_size_cells, &reg);
646
647 if (size == 0)
648 continue;
649 pr_debug(" - %llx , %llx\n", (unsigned long long)base,
650 (unsigned long long)size);
651
652 lmb_add(base, size);
653 }
654 return 0;
655} 48}
656 49
657#ifdef CONFIG_PHYP_DUMP 50void __init early_init_dt_add_memory_arch(u64 base, u64 size)
658/**
659 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
660 *
661 * Function to find the largest size we need to reserve
662 * during early boot process.
663 *
664 * It either looks for boot param and returns that OR
665 * returns larger of 256 or 5% rounded down to multiples of 256MB.
666 *
667 */
668static inline unsigned long phyp_dump_calculate_reserve_size(void)
669{ 51{
670 unsigned long tmp; 52 lmb_add(base, size);
671
672 if (phyp_dump_info->reserve_bootvar)
673 return phyp_dump_info->reserve_bootvar;
674
675 /* divide by 20 to get 5% of value */
676 tmp = lmb_end_of_DRAM();
677 do_div(tmp, 20);
678
679 /* round it down in multiples of 256 */
680 tmp = tmp & ~0x0FFFFFFFUL;
681
682 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
683} 53}
684 54
685/** 55u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
686 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
687 *
688 * This routine may reserve memory regions in the kernel only
689 * if the system is supported and a dump was taken in last
690 * boot instance or if the hardware is supported and the
691 * scratch area needs to be setup. In other instances it returns
692 * without reserving anything. The memory in case of dump being
693 * active is freed when the dump is collected (by userland tools).
694 */
695static void __init phyp_dump_reserve_mem(void)
696{ 56{
697 unsigned long base, size; 57 return lmb_alloc(size, align);
698 unsigned long variable_reserve_size;
699
700 if (!phyp_dump_info->phyp_dump_configured) {
701 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
702 return;
703 }
704
705 if (!phyp_dump_info->phyp_dump_at_boot) {
706 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
707 return;
708 }
709
710 variable_reserve_size = phyp_dump_calculate_reserve_size();
711
712 if (phyp_dump_info->phyp_dump_is_active) {
713 /* Reserve *everything* above RMR.Area freed by userland tools*/
714 base = variable_reserve_size;
715 size = lmb_end_of_DRAM() - base;
716
717 /* XXX crashed_ram_end is wrong, since it may be beyond
718 * the memory_limit, it will need to be adjusted. */
719 lmb_reserve(base, size);
720
721 phyp_dump_info->init_reserve_start = base;
722 phyp_dump_info->init_reserve_size = size;
723 } else {
724 size = phyp_dump_info->cpu_state_size +
725 phyp_dump_info->hpte_region_size +
726 variable_reserve_size;
727 base = lmb_end_of_DRAM() - size;
728 lmb_reserve(base, size);
729 phyp_dump_info->init_reserve_start = base;
730 phyp_dump_info->init_reserve_size = size;
731 }
732} 58}
733#else
734static inline void __init phyp_dump_reserve_mem(void) {}
735#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
736 59
737#ifdef CONFIG_EARLY_PRINTK 60#ifdef CONFIG_EARLY_PRINTK
738/* MS this is Microblaze specifig function */ 61/* MS this is Microblaze specifig function */
@@ -775,11 +98,6 @@ void __init early_init_devtree(void *params)
775 /* Setup flat device-tree pointer */ 98 /* Setup flat device-tree pointer */
776 initial_boot_params = params; 99 initial_boot_params = params;
777 100
778#ifdef CONFIG_PHYP_DUMP
779 /* scan tree to see if dump occured during last boot */
780 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
781#endif
782
783 /* Retrieve various informations from the /chosen node of the 101 /* Retrieve various informations from the /chosen node of the
784 * device-tree, including the platform type, initrd location and 102 * device-tree, including the platform type, initrd location and
785 * size, TCE reserve, and more ... 103 * size, TCE reserve, and more ...
@@ -799,33 +117,18 @@ void __init early_init_devtree(void *params)
799 117
800 pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size()); 118 pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
801 119
802 pr_debug("Scanning CPUs ...\n");
803
804 /* Retreive CPU related informations from the flat tree
805 * (altivec support, boot CPU ID, ...)
806 */
807 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
808
809 pr_debug(" <- early_init_devtree()\n"); 120 pr_debug(" <- early_init_devtree()\n");
810} 121}
811 122
812/** 123#ifdef CONFIG_BLK_DEV_INITRD
813 * Indicates whether the root node has a given value in its 124void __init early_init_dt_setup_initrd_arch(unsigned long start,
814 * compatible property. 125 unsigned long end)
815 */
816int machine_is_compatible(const char *compat)
817{ 126{
818 struct device_node *root; 127 initrd_start = (unsigned long)__va(start);
819 int rc = 0; 128 initrd_end = (unsigned long)__va(end);
820 129 initrd_below_start_ok = 1;
821 root = of_find_node_by_path("/");
822 if (root) {
823 rc = of_device_is_compatible(root, compat);
824 of_node_put(root);
825 }
826 return rc;
827} 130}
828EXPORT_SYMBOL(machine_is_compatible); 131#endif
829 132
830/******* 133/*******
831 * 134 *
@@ -838,296 +141,6 @@ EXPORT_SYMBOL(machine_is_compatible);
838 * 141 *
839 *******/ 142 *******/
840 143
841/**
842 * of_find_node_by_phandle - Find a node given a phandle
843 * @handle: phandle of the node to find
844 *
845 * Returns a node pointer with refcount incremented, use
846 * of_node_put() on it when done.
847 */
848struct device_node *of_find_node_by_phandle(phandle handle)
849{
850 struct device_node *np;
851
852 read_lock(&devtree_lock);
853 for (np = allnodes; np != NULL; np = np->allnext)
854 if (np->linux_phandle == handle)
855 break;
856 of_node_get(np);
857 read_unlock(&devtree_lock);
858 return np;
859}
860EXPORT_SYMBOL(of_find_node_by_phandle);
861
862/**
863 * of_find_all_nodes - Get next node in global list
864 * @prev: Previous node or NULL to start iteration
865 * of_node_put() will be called on it
866 *
867 * Returns a node pointer with refcount incremented, use
868 * of_node_put() on it when done.
869 */
870struct device_node *of_find_all_nodes(struct device_node *prev)
871{
872 struct device_node *np;
873
874 read_lock(&devtree_lock);
875 np = prev ? prev->allnext : allnodes;
876 for (; np != NULL; np = np->allnext)
877 if (of_node_get(np))
878 break;
879 of_node_put(prev);
880 read_unlock(&devtree_lock);
881 return np;
882}
883EXPORT_SYMBOL(of_find_all_nodes);
884
885/**
886 * of_node_get - Increment refcount of a node
887 * @node: Node to inc refcount, NULL is supported to
888 * simplify writing of callers
889 *
890 * Returns node.
891 */
892struct device_node *of_node_get(struct device_node *node)
893{
894 if (node)
895 kref_get(&node->kref);
896 return node;
897}
898EXPORT_SYMBOL(of_node_get);
899
900static inline struct device_node *kref_to_device_node(struct kref *kref)
901{
902 return container_of(kref, struct device_node, kref);
903}
904
905/**
906 * of_node_release - release a dynamically allocated node
907 * @kref: kref element of the node to be released
908 *
909 * In of_node_put() this function is passed to kref_put()
910 * as the destructor.
911 */
912static void of_node_release(struct kref *kref)
913{
914 struct device_node *node = kref_to_device_node(kref);
915 struct property *prop = node->properties;
916
917 /* We should never be releasing nodes that haven't been detached. */
918 if (!of_node_check_flag(node, OF_DETACHED)) {
919 printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
920 node->full_name);
921 dump_stack();
922 kref_init(&node->kref);
923 return;
924 }
925
926 if (!of_node_check_flag(node, OF_DYNAMIC))
927 return;
928
929 while (prop) {
930 struct property *next = prop->next;
931 kfree(prop->name);
932 kfree(prop->value);
933 kfree(prop);
934 prop = next;
935
936 if (!prop) {
937 prop = node->deadprops;
938 node->deadprops = NULL;
939 }
940 }
941 kfree(node->full_name);
942 kfree(node->data);
943 kfree(node);
944}
945
946/**
947 * of_node_put - Decrement refcount of a node
948 * @node: Node to dec refcount, NULL is supported to
949 * simplify writing of callers
950 *
951 */
952void of_node_put(struct device_node *node)
953{
954 if (node)
955 kref_put(&node->kref, of_node_release);
956}
957EXPORT_SYMBOL(of_node_put);
958
959/*
960 * Plug a device node into the tree and global list.
961 */
962void of_attach_node(struct device_node *np)
963{
964 unsigned long flags;
965
966 write_lock_irqsave(&devtree_lock, flags);
967 np->sibling = np->parent->child;
968 np->allnext = allnodes;
969 np->parent->child = np;
970 allnodes = np;
971 write_unlock_irqrestore(&devtree_lock, flags);
972}
973
974/*
975 * "Unplug" a node from the device tree. The caller must hold
976 * a reference to the node. The memory associated with the node
977 * is not freed until its refcount goes to zero.
978 */
979void of_detach_node(struct device_node *np)
980{
981 struct device_node *parent;
982 unsigned long flags;
983
984 write_lock_irqsave(&devtree_lock, flags);
985
986 parent = np->parent;
987 if (!parent)
988 goto out_unlock;
989
990 if (allnodes == np)
991 allnodes = np->allnext;
992 else {
993 struct device_node *prev;
994 for (prev = allnodes;
995 prev->allnext != np;
996 prev = prev->allnext)
997 ;
998 prev->allnext = np->allnext;
999 }
1000
1001 if (parent->child == np)
1002 parent->child = np->sibling;
1003 else {
1004 struct device_node *prevsib;
1005 for (prevsib = np->parent->child;
1006 prevsib->sibling != np;
1007 prevsib = prevsib->sibling)
1008 ;
1009 prevsib->sibling = np->sibling;
1010 }
1011
1012 of_node_set_flag(np, OF_DETACHED);
1013
1014out_unlock:
1015 write_unlock_irqrestore(&devtree_lock, flags);
1016}
1017
1018/*
1019 * Add a property to a node
1020 */
1021int prom_add_property(struct device_node *np, struct property *prop)
1022{
1023 struct property **next;
1024 unsigned long flags;
1025
1026 prop->next = NULL;
1027 write_lock_irqsave(&devtree_lock, flags);
1028 next = &np->properties;
1029 while (*next) {
1030 if (strcmp(prop->name, (*next)->name) == 0) {
1031 /* duplicate ! don't insert it */
1032 write_unlock_irqrestore(&devtree_lock, flags);
1033 return -1;
1034 }
1035 next = &(*next)->next;
1036 }
1037 *next = prop;
1038 write_unlock_irqrestore(&devtree_lock, flags);
1039
1040#ifdef CONFIG_PROC_DEVICETREE
1041 /* try to add to proc as well if it was initialized */
1042 if (np->pde)
1043 proc_device_tree_add_prop(np->pde, prop);
1044#endif /* CONFIG_PROC_DEVICETREE */
1045
1046 return 0;
1047}
1048
1049/*
1050 * Remove a property from a node. Note that we don't actually
1051 * remove it, since we have given out who-knows-how-many pointers
1052 * to the data using get-property. Instead we just move the property
1053 * to the "dead properties" list, so it won't be found any more.
1054 */
1055int prom_remove_property(struct device_node *np, struct property *prop)
1056{
1057 struct property **next;
1058 unsigned long flags;
1059 int found = 0;
1060
1061 write_lock_irqsave(&devtree_lock, flags);
1062 next = &np->properties;
1063 while (*next) {
1064 if (*next == prop) {
1065 /* found the node */
1066 *next = prop->next;
1067 prop->next = np->deadprops;
1068 np->deadprops = prop;
1069 found = 1;
1070 break;
1071 }
1072 next = &(*next)->next;
1073 }
1074 write_unlock_irqrestore(&devtree_lock, flags);
1075
1076 if (!found)
1077 return -ENODEV;
1078
1079#ifdef CONFIG_PROC_DEVICETREE
1080 /* try to remove the proc node as well */
1081 if (np->pde)
1082 proc_device_tree_remove_prop(np->pde, prop);
1083#endif /* CONFIG_PROC_DEVICETREE */
1084
1085 return 0;
1086}
1087
1088/*
1089 * Update a property in a node. Note that we don't actually
1090 * remove it, since we have given out who-knows-how-many pointers
1091 * to the data using get-property. Instead we just move the property
1092 * to the "dead properties" list, and add the new property to the
1093 * property list
1094 */
1095int prom_update_property(struct device_node *np,
1096 struct property *newprop,
1097 struct property *oldprop)
1098{
1099 struct property **next;
1100 unsigned long flags;
1101 int found = 0;
1102
1103 write_lock_irqsave(&devtree_lock, flags);
1104 next = &np->properties;
1105 while (*next) {
1106 if (*next == oldprop) {
1107 /* found the node */
1108 newprop->next = oldprop->next;
1109 *next = newprop;
1110 oldprop->next = np->deadprops;
1111 np->deadprops = oldprop;
1112 found = 1;
1113 break;
1114 }
1115 next = &(*next)->next;
1116 }
1117 write_unlock_irqrestore(&devtree_lock, flags);
1118
1119 if (!found)
1120 return -ENODEV;
1121
1122#ifdef CONFIG_PROC_DEVICETREE
1123 /* try to add to proc as well if it was initialized */
1124 if (np->pde)
1125 proc_device_tree_update_prop(np->pde, newprop, oldprop);
1126#endif /* CONFIG_PROC_DEVICETREE */
1127
1128 return 0;
1129}
1130
1131#if defined(CONFIG_DEBUG_FS) && defined(DEBUG) 144#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
1132static struct debugfs_blob_wrapper flat_dt_blob; 145static struct debugfs_blob_wrapper flat_dt_blob;
1133 146
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index ae0352ecd5a9..bf7e6c27e318 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -256,7 +256,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
256 if (ppdev == NULL) { 256 if (ppdev == NULL) {
257 struct pci_controller *host; 257 struct pci_controller *host;
258 host = pci_bus_to_host(pdev->bus); 258 host = pci_bus_to_host(pdev->bus);
259 ppnode = host ? host->arch_data : NULL; 259 ppnode = host ? host->dn : NULL;
260 /* No node for host bridge ? give up */ 260 /* No node for host bridge ? give up */
261 if (ppnode == NULL) 261 if (ppnode == NULL)
262 return -EINVAL; 262 return -EINVAL;
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 4b3ac32754de..a4a7770c6140 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -75,29 +75,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
75{ 75{
76 int rval; 76 int rval;
77 unsigned long val = 0; 77 unsigned long val = 0;
78 unsigned long copied;
79 78
80 switch (request) { 79 switch (request) {
81 case PTRACE_PEEKTEXT: /* read word at location addr. */
82 case PTRACE_PEEKDATA:
83 pr_debug("PEEKTEXT/PEEKDATA at %08lX\n", addr);
84 copied = access_process_vm(child, addr, &val, sizeof(val), 0);
85 rval = -EIO;
86 if (copied != sizeof(val))
87 break;
88 rval = put_user(val, (unsigned long *)data);
89 break;
90
91 case PTRACE_POKETEXT: /* write the word at location addr. */
92 case PTRACE_POKEDATA:
93 pr_debug("POKETEXT/POKEDATA to %08lX\n", addr);
94 rval = 0;
95 if (access_process_vm(child, addr, &data, sizeof(data), 1)
96 == sizeof(data))
97 break;
98 rval = -EIO;
99 break;
100
101 /* Read/write the word at location ADDR in the registers. */ 80 /* Read/write the word at location ADDR in the registers. */
102 case PTRACE_PEEKUSR: 81 case PTRACE_PEEKUSR:
103 case PTRACE_POKEUSR: 82 case PTRACE_POKEUSR:
@@ -130,50 +109,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
130 if (rval == 0 && request == PTRACE_PEEKUSR) 109 if (rval == 0 && request == PTRACE_PEEKUSR)
131 rval = put_user(val, (unsigned long *)data); 110 rval = put_user(val, (unsigned long *)data);
132 break; 111 break;
133 /* Continue and stop at next (return from) syscall */
134 case PTRACE_SYSCALL:
135 pr_debug("PTRACE_SYSCALL\n");
136 case PTRACE_SINGLESTEP:
137 pr_debug("PTRACE_SINGLESTEP\n");
138 /* Restart after a signal. */
139 case PTRACE_CONT:
140 pr_debug("PTRACE_CONT\n");
141 rval = -EIO;
142 if (!valid_signal(data))
143 break;
144
145 if (request == PTRACE_SYSCALL)
146 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
147 else
148 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
149
150 child->exit_code = data;
151 pr_debug("wakeup_process\n");
152 wake_up_process(child);
153 rval = 0;
154 break;
155
156 /*
157 * make the child exit. Best I can do is send it a sigkill.
158 * perhaps it should be put in the status that it wants to
159 * exit.
160 */
161 case PTRACE_KILL:
162 pr_debug("PTRACE_KILL\n");
163 rval = 0;
164 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
165 break;
166 child->exit_code = SIGKILL;
167 wake_up_process(child);
168 break;
169
170 case PTRACE_DETACH: /* detach a process that was attached. */
171 pr_debug("PTRACE_DETACH\n");
172 rval = ptrace_detach(child, data);
173 break;
174 default: 112 default:
175 /* rval = ptrace_request(child, request, addr, data); noMMU */ 113 rval = ptrace_request(child, request, addr, data);
176 rval = -EIO;
177 } 114 }
178 return rval; 115 return rval;
179} 116}
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
new file mode 100644
index 000000000000..a1721a33042e
--- /dev/null
+++ b/arch/microblaze/kernel/reset.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2009 PetaLogix
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#include <linux/init.h>
11#include <linux/of_platform.h>
12#include <asm/prom.h>
13
14/* Trigger specific functions */
15#ifdef CONFIG_GPIOLIB
16
17#include <linux/of_gpio.h>
18
19static int handle; /* reset pin handle */
20static unsigned int reset_val;
21
22static int of_reset_gpio_handle(void)
23{
24 int ret; /* variable which stored handle reset gpio pin */
25 struct device_node *root; /* root node */
26 struct device_node *gpio; /* gpio node */
27 struct of_gpio_chip *of_gc = NULL;
28 enum of_gpio_flags flags ;
29 const void *gpio_spec;
30
31 /* find out root node */
32 root = of_find_node_by_path("/");
33
34 /* give me handle for gpio node to be possible allocate pin */
35 ret = of_parse_phandles_with_args(root, "hard-reset-gpios",
36 "#gpio-cells", 0, &gpio, &gpio_spec);
37 if (ret) {
38 pr_debug("%s: can't parse gpios property\n", __func__);
39 goto err0;
40 }
41
42 of_gc = gpio->data;
43 if (!of_gc) {
44 pr_debug("%s: gpio controller %s isn't registered\n",
45 root->full_name, gpio->full_name);
46 ret = -ENODEV;
47 goto err1;
48 }
49
50 ret = of_gc->xlate(of_gc, root, gpio_spec, &flags);
51 if (ret < 0)
52 goto err1;
53
54 ret += of_gc->gc.base;
55err1:
56 of_node_put(gpio);
57err0:
58 pr_debug("%s exited with status %d\n", __func__, ret);
59 return ret;
60}
61
62void of_platform_reset_gpio_probe(void)
63{
64 int ret;
65 handle = of_reset_gpio_handle();
66
67 if (!gpio_is_valid(handle)) {
68 printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n",
69 handle, "reset");
70 }
71
72 ret = gpio_request(handle, "reset");
73 if (ret < 0) {
74 printk(KERN_INFO "GPIO pin is already allocated\n");
75 return;
76 }
77
78 /* get current setup value */
79 reset_val = gpio_get_value(handle);
80 /* FIXME maybe worth to perform any action */
81 pr_debug("Reset: Gpio output state: 0x%x\n", reset_val);
82
83 /* Setup GPIO as output */
84 ret = gpio_direction_output(handle, 0);
85 if (ret < 0)
86 goto err;
87
88 /* Setup output direction */
89 gpio_set_value(handle, 0);
90
91 printk(KERN_INFO "RESET: Registered gpio device: %d, current val: %d\n",
92 handle, reset_val);
93 return;
94err:
95 gpio_free(handle);
96 return;
97}
98
99
100static void gpio_system_reset(void)
101{
102 gpio_set_value(handle, 1 - reset_val);
103}
104#else
105#define gpio_system_reset() do {} while (0)
106void of_platform_reset_gpio_probe(void)
107{
108 return;
109}
110#endif
111
112void machine_restart(char *cmd)
113{
114 printk(KERN_NOTICE "Machine restart...\n");
115 gpio_system_reset();
116 dump_stack();
117 while (1)
118 ;
119}
120
121void machine_shutdown(void)
122{
123 printk(KERN_NOTICE "Machine shutdown...\n");
124 while (1)
125 ;
126}
127
128void machine_halt(void)
129{
130 printk(KERN_NOTICE "Machine halt...\n");
131 while (1)
132 ;
133}
134
135void machine_power_off(void)
136{
137 printk(KERN_NOTICE "Machine power off...\n");
138 while (1)
139 ;
140}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 8c1e0f4dcf18..17c98dbcec88 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -22,7 +22,10 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <linux/param.h> 24#include <linux/param.h>
25#include <linux/pci.h>
25#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/of_platform.h>
28#include <linux/dma-mapping.h>
26#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
27#include <asm/entry.h> 30#include <asm/entry.h>
28#include <asm/cpuinfo.h> 31#include <asm/cpuinfo.h>
@@ -52,16 +55,12 @@ void __init setup_arch(char **cmdline_p)
52 /* irq_early_init(); */ 55 /* irq_early_init(); */
53 setup_cpuinfo(); 56 setup_cpuinfo();
54 57
55 __invalidate_icache_all(); 58 microblaze_cache_init();
56 __enable_icache();
57
58 __invalidate_dcache_all();
59 __enable_dcache();
60
61 panic_timeout = 120;
62 59
63 setup_memory(); 60 setup_memory();
64 61
62 xilinx_pci_init();
63
65#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 64#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
66 printk(KERN_NOTICE "Self modified code enable\n"); 65 printk(KERN_NOTICE "Self modified code enable\n");
67#endif 66#endif
@@ -93,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
93} 92}
94#endif /* CONFIG_MTD_UCLINUX_EBSS */ 93#endif /* CONFIG_MTD_UCLINUX_EBSS */
95 94
95#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
96#define eprintk early_printk
97#else
98#define eprintk printk
99#endif
100
96void __init machine_early_init(const char *cmdline, unsigned int ram, 101void __init machine_early_init(const char *cmdline, unsigned int ram,
97 unsigned int fdt, unsigned int msr) 102 unsigned int fdt, unsigned int msr)
98{ 103{
@@ -131,6 +136,8 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
131 strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE); 136 strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
132#endif 137#endif
133 138
139 lockdep_init();
140
134/* initialize device tree for usage in early_printk */ 141/* initialize device tree for usage in early_printk */
135 early_init_devtree((void *)_fdt_start); 142 early_init_devtree((void *)_fdt_start);
136 143
@@ -138,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
138 setup_early_printk(NULL); 145 setup_early_printk(NULL);
139#endif 146#endif
140 147
141 early_printk("Ramdisk addr 0x%08x, ", ram); 148 eprintk("Ramdisk addr 0x%08x, ", ram);
142 if (fdt) 149 if (fdt)
143 early_printk("FDT at 0x%08x\n", fdt); 150 eprintk("FDT at 0x%08x\n", fdt);
144 else 151 else
145 early_printk("Compiled-in FDT at 0x%08x\n", 152 eprintk("Compiled-in FDT at 0x%08x\n",
146 (unsigned int)_fdt_start); 153 (unsigned int)_fdt_start);
147 154
148#ifdef CONFIG_MTD_UCLINUX 155#ifdef CONFIG_MTD_UCLINUX
149 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 156 eprintk("Found romfs @ 0x%08x (0x%08x)\n",
150 romfs_base, romfs_size); 157 romfs_base, romfs_size);
151 early_printk("#### klimit %p ####\n", old_klimit); 158 eprintk("#### klimit %p ####\n", old_klimit);
152 BUG_ON(romfs_size < 0); /* What else can we do? */ 159 BUG_ON(romfs_size < 0); /* What else can we do? */
153 160
154 early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", 161 eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
155 romfs_size, romfs_base, (unsigned)&_ebss); 162 romfs_size, romfs_base, (unsigned)&_ebss);
156 163
157 early_printk("New klimit: 0x%08x\n", (unsigned)klimit); 164 eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
158#endif 165#endif
159 166
160#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 167#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
161 if (msr) 168 if (msr)
162 early_printk("!!!Your kernel has setup MSR instruction but " 169 eprintk("!!!Your kernel has setup MSR instruction but "
163 "CPU don't have it %d\n", msr); 170 "CPU don't have it %d\n", msr);
164#else 171#else
165 if (!msr) 172 if (!msr)
166 early_printk("!!!Your kernel not setup MSR instruction but " 173 eprintk("!!!Your kernel not setup MSR instruction but "
167 "CPU have it %d\n", msr); 174 "CPU have it %d\n", msr);
168#endif 175#endif
169 176
@@ -187,31 +194,36 @@ static int microblaze_debugfs_init(void)
187arch_initcall(microblaze_debugfs_init); 194arch_initcall(microblaze_debugfs_init);
188#endif 195#endif
189 196
190void machine_restart(char *cmd) 197static int dflt_bus_notify(struct notifier_block *nb,
198 unsigned long action, void *data)
191{ 199{
192 printk(KERN_NOTICE "Machine restart...\n"); 200 struct device *dev = data;
193 dump_stack();
194 while (1)
195 ;
196}
197 201
198void machine_shutdown(void) 202 /* We are only intereted in device addition */
199{ 203 if (action != BUS_NOTIFY_ADD_DEVICE)
200 printk(KERN_NOTICE "Machine shutdown...\n"); 204 return 0;
201 while (1)
202 ;
203}
204 205
205void machine_halt(void) 206 set_dma_ops(dev, &dma_direct_ops);
206{ 207
207 printk(KERN_NOTICE "Machine halt...\n"); 208 return NOTIFY_DONE;
208 while (1)
209 ;
210} 209}
211 210
212void machine_power_off(void) 211static struct notifier_block dflt_plat_bus_notifier = {
212 .notifier_call = dflt_bus_notify,
213 .priority = INT_MAX,
214};
215
216static struct notifier_block dflt_of_bus_notifier = {
217 .notifier_call = dflt_bus_notify,
218 .priority = INT_MAX,
219};
220
221static int __init setup_bus_notifier(void)
213{ 222{
214 printk(KERN_NOTICE "Machine power off...\n"); 223 bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
215 while (1) 224 bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
216 ; 225
226 return 0;
217} 227}
228
229arch_initcall(setup_bus_notifier);
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 1c80e4fc40ce..d8d3bb396cd6 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -44,7 +44,6 @@
44 44
45asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall); 45asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall);
46 46
47
48asmlinkage long 47asmlinkage long
49sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 48sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
50 struct pt_regs *regs) 49 struct pt_regs *regs)
@@ -176,6 +175,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
176 struct rt_sigframe __user *frame; 175 struct rt_sigframe __user *frame;
177 int err = 0; 176 int err = 0;
178 int signal; 177 int signal;
178 unsigned long address = 0;
179#ifdef CONFIG_MMU
180 pmd_t *pmdp;
181 pte_t *ptep;
182#endif
179 183
180 frame = get_sigframe(ka, regs, sizeof(*frame)); 184 frame = get_sigframe(ka, regs, sizeof(*frame));
181 185
@@ -216,8 +220,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
216 Negative 8 offset because return is rtsd r15, 8 */ 220 Negative 8 offset because return is rtsd r15, 8 */
217 regs->r15 = ((unsigned long)frame->tramp)-8; 221 regs->r15 = ((unsigned long)frame->tramp)-8;
218 222
219 __invalidate_cache_sigtramp((unsigned long)frame->tramp); 223 address = ((unsigned long)frame->tramp);
220 224#ifdef CONFIG_MMU
225 pmdp = pmd_offset(pud_offset(
226 pgd_offset(current->mm, address),
227 address), address);
228
229 preempt_disable();
230 ptep = pte_offset_map(pmdp, address);
231 if (pte_present(*ptep)) {
232 address = (unsigned long) page_address(pte_page(*ptep));
233 /* MS: I need add offset in page */
234 address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
235 /* MS address is virtual */
236 address = virt_to_phys(address);
237 invalidate_icache_range(address, address + 8);
238 flush_dcache_range(address, address + 8);
239 }
240 pte_unmap(ptep);
241 preempt_enable();
242#else
243 flush_icache_range(address, address + 8);
244 flush_dcache_range(address, address + 8);
245#endif
221 if (err) 246 if (err)
222 goto give_sigsegv; 247 goto give_sigsegv;
223 248
@@ -233,6 +258,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
233 258
234 set_fs(USER_DS); 259 set_fs(USER_DS);
235 260
261 /* the tracer may want to single-step inside the handler */
262 if (test_thread_flag(TIF_SINGLESTEP))
263 ptrace_notify(SIGTRAP);
264
236#ifdef DEBUG_SIG 265#ifdef DEBUG_SIG
237 printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n", 266 printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
238 current->comm, current->pid, frame, regs->pc); 267 current->comm, current->pid, frame, regs->pc);
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
new file mode 100644
index 000000000000..123692f22647
--- /dev/null
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -0,0 +1,65 @@
1/*
2 * Stack trace support for Microblaze.
3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/sched.h>
13#include <linux/stacktrace.h>
14#include <linux/thread_info.h>
15#include <linux/ptrace.h>
16#include <linux/module.h>
17
18/* FIXME initial support */
19void save_stack_trace(struct stack_trace *trace)
20{
21 unsigned long *sp;
22 unsigned long addr;
23 asm("addik %0, r1, 0" : "=r" (sp));
24
25 while (!kstack_end(sp)) {
26 addr = *sp++;
27 if (__kernel_text_address(addr)) {
28 if (trace->skip > 0)
29 trace->skip--;
30 else
31 trace->entries[trace->nr_entries++] = addr;
32
33 if (trace->nr_entries >= trace->max_entries)
34 break;
35 }
36 }
37}
38EXPORT_SYMBOL_GPL(save_stack_trace);
39
40void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
41{
42 unsigned int *sp;
43 unsigned long addr;
44
45 struct thread_info *ti = task_thread_info(tsk);
46
47 if (tsk == current)
48 asm("addik %0, r1, 0" : "=r" (sp));
49 else
50 sp = (unsigned int *)ti->cpu_context.r1;
51
52 while (!kstack_end(sp)) {
53 addr = *sp++;
54 if (__kernel_text_address(addr)) {
55 if (trace->skip > 0)
56 trace->skip--;
57 else
58 trace->entries[trace->nr_entries++] = addr;
59
60 if (trace->nr_entries >= trace->max_entries)
61 break;
62 }
63 }
64}
65EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 07cabed4b947..f4e00b7f1259 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -30,6 +30,7 @@
30#include <linux/semaphore.h> 30#include <linux/semaphore.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/unistd.h> 32#include <linux/unistd.h>
33#include <linux/slab.h>
33 34
34#include <asm/syscalls.h> 35#include <asm/syscalls.h>
35 36
@@ -62,46 +63,14 @@ out:
62 return error; 63 return error;
63} 64}
64 65
65asmlinkage long
66sys_mmap2(unsigned long addr, unsigned long len,
67 unsigned long prot, unsigned long flags,
68 unsigned long fd, unsigned long pgoff)
69{
70 struct file *file = NULL;
71 int ret = -EBADF;
72
73 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
74 if (!(flags & MAP_ANONYMOUS)) {
75 file = fget(fd);
76 if (!file) {
77 printk(KERN_INFO "no fd in mmap\r\n");
78 goto out;
79 }
80 }
81
82 down_write(&current->mm->mmap_sem);
83 ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
84 up_write(&current->mm->mmap_sem);
85 if (file)
86 fput(file);
87out:
88 return ret;
89}
90
91asmlinkage long sys_mmap(unsigned long addr, unsigned long len, 66asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
92 unsigned long prot, unsigned long flags, 67 unsigned long prot, unsigned long flags,
93 unsigned long fd, off_t pgoff) 68 unsigned long fd, off_t pgoff)
94{ 69{
95 int err = -EINVAL; 70 if (pgoff & ~PAGE_MASK)
96 71 return -EINVAL;
97 if (pgoff & ~PAGE_MASK) {
98 printk(KERN_INFO "no pagemask in mmap\r\n");
99 goto out;
100 }
101 72
102 err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); 73 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
103out:
104 return err;
105} 74}
106 75
107/* 76/*
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index ecec19155135..03376dc814c9 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -183,7 +183,7 @@ ENTRY(sys_call_table)
183 .long sys_rt_sigpending 183 .long sys_rt_sigpending
184 .long sys_rt_sigtimedwait 184 .long sys_rt_sigtimedwait
185 .long sys_rt_sigqueueinfo 185 .long sys_rt_sigqueueinfo
186 .long sys_rt_sigsuspend_wrapper 186 .long sys_rt_sigsuspend
187 .long sys_pread64 /* 180 */ 187 .long sys_pread64 /* 180 */
188 .long sys_pwrite64 188 .long sys_pwrite64
189 .long sys_chown 189 .long sys_chown
@@ -196,7 +196,7 @@ ENTRY(sys_call_table)
196 .long sys_ni_syscall /* reserved for streams2 */ 196 .long sys_ni_syscall /* reserved for streams2 */
197 .long sys_vfork /* 190 */ 197 .long sys_vfork /* 190 */
198 .long sys_getrlimit 198 .long sys_getrlimit
199 .long sys_mmap2 /* mmap2 */ 199 .long sys_mmap_pgoff /* mmap2 */
200 .long sys_truncate64 200 .long sys_truncate64
201 .long sys_ftruncate64 201 .long sys_ftruncate64
202 .long sys_stat64 /* 195 */ 202 .long sys_stat64 /* 195 */
@@ -303,7 +303,7 @@ ENTRY(sys_call_table)
303 .long sys_mkdirat 303 .long sys_mkdirat
304 .long sys_mknodat 304 .long sys_mknodat
305 .long sys_fchownat 305 .long sys_fchownat
306 .long sys_ni_syscall 306 .long sys_futimesat
307 .long sys_fstatat64 /* 300 */ 307 .long sys_fstatat64 /* 300 */
308 .long sys_unlinkat 308 .long sys_unlinkat
309 .long sys_renameat 309 .long sys_renameat
@@ -366,8 +366,9 @@ ENTRY(sys_call_table)
366 .long sys_shutdown 366 .long sys_shutdown
367 .long sys_sendmsg /* 360 */ 367 .long sys_sendmsg /* 360 */
368 .long sys_recvmsg 368 .long sys_recvmsg
369 .long sys_ni_syscall 369 .long sys_accept4
370 .long sys_ni_syscall 370 .long sys_ni_syscall
371 .long sys_ni_syscall 371 .long sys_ni_syscall
372 .long sys_rt_tgsigqueueinfo /* 365 */ 372 .long sys_rt_tgsigqueueinfo /* 365 */
373 .long sys_perf_event_open 373 .long sys_perf_event_open
374 .long sys_recvmmsg
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 5499deae7fa6..ed61b2f17719 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -183,6 +183,31 @@ static cycle_t microblaze_read(struct clocksource *cs)
183 return (cycle_t) (in_be32(TIMER_BASE + TCR1)); 183 return (cycle_t) (in_be32(TIMER_BASE + TCR1));
184} 184}
185 185
186static struct timecounter microblaze_tc = {
187 .cc = NULL,
188};
189
190static cycle_t microblaze_cc_read(const struct cyclecounter *cc)
191{
192 return microblaze_read(NULL);
193}
194
195static struct cyclecounter microblaze_cc = {
196 .read = microblaze_cc_read,
197 .mask = CLOCKSOURCE_MASK(32),
198 .shift = 24,
199};
200
201int __init init_microblaze_timecounter(void)
202{
203 microblaze_cc.mult = div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC,
204 microblaze_cc.shift);
205
206 timecounter_init(&microblaze_tc, &microblaze_cc, sched_clock());
207
208 return 0;
209}
210
186static struct clocksource clocksource_microblaze = { 211static struct clocksource clocksource_microblaze = {
187 .name = "microblaze_clocksource", 212 .name = "microblaze_clocksource",
188 .rating = 300, 213 .rating = 300,
@@ -204,6 +229,9 @@ static int __init microblaze_clocksource_init(void)
204 out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT); 229 out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT);
205 /* start timer1 - up counting without interrupt */ 230 /* start timer1 - up counting without interrupt */
206 out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT); 231 out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT);
232
233 /* register timecounter - for ftrace support */
234 init_microblaze_timecounter();
207 return 0; 235 return 0;
208} 236}
209 237
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31b..75e49202a5ed 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23} 23}
24 24
25static int kstack_depth_to_print = 24; 25static unsigned long kstack_depth_to_print = 24;
26 26
27static int __init kstack_setup(char *s) 27static int __init kstack_setup(char *s)
28{ 28{
29 kstack_depth_to_print = strict_strtoul(s, 0, NULL); 29 return !strict_strtoul(s, 0, &kstack_depth_to_print);
30
31 return 1;
32} 30}
33__setup("kstack=", kstack_setup); 31__setup("kstack=", kstack_setup);
34 32
@@ -97,37 +95,3 @@ void dump_stack(void)
97 show_stack(NULL, NULL); 95 show_stack(NULL, NULL);
98} 96}
99EXPORT_SYMBOL(dump_stack); 97EXPORT_SYMBOL(dump_stack);
100
101#ifdef CONFIG_MMU
102void __bug(const char *file, int line, void *data)
103{
104 if (data)
105 printk(KERN_CRIT "kernel BUG at %s:%d (data = %p)!\n",
106 file, line, data);
107 else
108 printk(KERN_CRIT "kernel BUG at %s:%d!\n", file, line);
109
110 machine_halt();
111}
112
113int bad_trap(int trap_num, struct pt_regs *regs)
114{
115 printk(KERN_CRIT
116 "unimplemented trap %d called at 0x%08lx, pid %d!\n",
117 trap_num, regs->pc, current->pid);
118 return -ENOSYS;
119}
120
121int debug_trap(struct pt_regs *regs)
122{
123 int i;
124 printk(KERN_CRIT "debug trap\n");
125 for (i = 0; i < 32; i++) {
126 /* printk("r%i:%08X\t",i,regs->gpr[i]); */
127 if ((i % 4) == 3)
128 printk(KERN_CRIT "\n");
129 }
130 printk(KERN_CRIT "pc:%08lX\tmsr:%08lX\n", regs->pc, regs->msr);
131 return -ENOSYS;
132}
133#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index e704188d7855..db72d7124602 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -24,13 +24,15 @@ SECTIONS {
24 .text : AT(ADDR(.text) - LOAD_OFFSET) { 24 .text : AT(ADDR(.text) - LOAD_OFFSET) {
25 _text = . ; 25 _text = . ;
26 _stext = . ; 26 _stext = . ;
27 *(.text .text.*) 27 HEAD_TEXT
28 TEXT_TEXT
28 *(.fixup) 29 *(.fixup)
29 EXIT_TEXT 30 EXIT_TEXT
30 EXIT_CALL 31 EXIT_CALL
31 SCHED_TEXT 32 SCHED_TEXT
32 LOCK_TEXT 33 LOCK_TEXT
33 KPROBES_TEXT 34 KPROBES_TEXT
35 IRQENTRY_TEXT
34 . = ALIGN (4) ; 36 . = ALIGN (4) ;
35 _etext = . ; 37 _etext = . ;
36 } 38 }
@@ -86,6 +88,7 @@ SECTIONS {
86 _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ; 88 _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ;
87 } 89 }
88 90
91 . = ALIGN(PAGE_SIZE);
89 __init_begin = .; 92 __init_begin = .;
90 93
91 INIT_TEXT_SECTION(PAGE_SIZE) 94 INIT_TEXT_SECTION(PAGE_SIZE)
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index b579db068c06..4dfe47d3cd91 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,5 +10,4 @@ else
10lib-y += memcpy.o memmove.o 10lib-y += memcpy.o memmove.o
11endif 11endif
12 12
13lib-$(CONFIG_NO_MMU) += uaccess.o 13lib-y += uaccess_old.o
14lib-$(CONFIG_MMU) += uaccess_old.o
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 02e3ab4eddf3..fdc48bb065d8 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -30,8 +30,9 @@
30 */ 30 */
31 31
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33 33 .text
34 .globl memcpy 34 .globl memcpy
35 .type memcpy, @function
35 .ent memcpy 36 .ent memcpy
36 37
37memcpy: 38memcpy:
@@ -345,9 +346,11 @@ a_done:
345 rtsd r15, 8 346 rtsd r15, 8
346 nop 347 nop
347 348
349.size memcpy, . - memcpy
348.end memcpy 350.end memcpy
349/*----------------------------------------------------------------------------*/ 351/*----------------------------------------------------------------------------*/
350 .globl memmove 352 .globl memmove
353 .type memmove, @function
351 .ent memmove 354 .ent memmove
352 355
353memmove: 356memmove:
@@ -659,4 +662,5 @@ d_done:
659 rtsd r15, 8 662 rtsd r15, 8
660 nop 663 nop
661 664
665.size memmove, . - memmove
662.end memmove 666.end memmove
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index 6a907c58a4bc..014bac92bdff 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -9,7 +9,7 @@
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from 9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567 10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 * 11 *
12 * Attempts were made, unsuccesfully, to contact the original 12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original 13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice. 14 * copyright notice.
15 * 15 *
@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
53 const uint32_t *i_src; 53 const uint32_t *i_src;
54 uint32_t *i_dst; 54 uint32_t *i_dst;
55 55
56 if (c >= 4) { 56 if (likely(c >= 4)) {
57 unsigned value, buf_hold; 57 unsigned value, buf_hold;
58 58
59 /* Align the dstination to a word boundry. */ 59 /* Align the dstination to a word boundry. */
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index d4e9f49a71f7..0929198c5e68 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -9,7 +9,7 @@
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from 9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567 10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 * 11 *
12 * Attempts were made, unsuccesfully, to contact the original 12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original 13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice. 14 * copyright notice.
15 * 15 *
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 941dc8f94b03..ecfb663e1fc1 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -9,7 +9,7 @@
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from 9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567 10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 * 11 *
12 * Attempts were made, unsuccesfully, to contact the original 12 * Attempts were made, unsuccessfully, to contact the original
13 * author of this code (Michael Morrow, Intel). Below is the original 13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice. 14 * copyright notice.
15 * 15 *
@@ -33,22 +33,23 @@
33#ifdef __HAVE_ARCH_MEMSET 33#ifdef __HAVE_ARCH_MEMSET
34void *memset(void *v_src, int c, __kernel_size_t n) 34void *memset(void *v_src, int c, __kernel_size_t n)
35{ 35{
36
37 char *src = v_src; 36 char *src = v_src;
38#ifdef CONFIG_OPT_LIB_FUNCTION 37#ifdef CONFIG_OPT_LIB_FUNCTION
39 uint32_t *i_src; 38 uint32_t *i_src;
40 uint32_t w32; 39 uint32_t w32 = 0;
41#endif 40#endif
42 /* Truncate c to 8 bits */ 41 /* Truncate c to 8 bits */
43 c = (c & 0xFF); 42 c = (c & 0xFF);
44 43
45#ifdef CONFIG_OPT_LIB_FUNCTION 44#ifdef CONFIG_OPT_LIB_FUNCTION
46 /* Make a repeating word out of it */ 45 if (unlikely(c)) {
47 w32 = c; 46 /* Make a repeating word out of it */
48 w32 |= w32 << 8; 47 w32 = c;
49 w32 |= w32 << 16; 48 w32 |= w32 << 8;
49 w32 |= w32 << 16;
50 }
50 51
51 if (n >= 4) { 52 if (likely(n >= 4)) {
52 /* Align the destination to a word boundary */ 53 /* Align the destination to a word boundary */
53 /* This is done in an endian independant manner */ 54 /* This is done in an endian independant manner */
54 switch ((unsigned) src & 3) { 55 switch ((unsigned) src & 3) {
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
deleted file mode 100644
index 8eb9df5a26c9..000000000000
--- a/arch/microblaze/lib/uaccess.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/string.h>
10#include <asm/uaccess.h>
11
12#include <asm/bug.h>
13
14long strnlen_user(const char __user *src, long count)
15{
16 return strlen(src) + 1;
17}
18
19#define __do_strncpy_from_user(dst, src, count, res) \
20 do { \
21 char *tmp; \
22 strncpy(dst, src, count); \
23 for (tmp = dst; *tmp && count > 0; tmp++, count--) \
24 ; \
25 res = (tmp - dst); \
26 } while (0)
27
28long __strncpy_from_user(char *dst, const char __user *src, long count)
29{
30 long res;
31 __do_strncpy_from_user(dst, src, count, res);
32 return res;
33}
34
35long strncpy_from_user(char *dst, const char __user *src, long count)
36{
37 long res = -EFAULT;
38 if (access_ok(VERIFY_READ, src, 1))
39 __do_strncpy_from_user(dst, src, count, res);
40 return res;
41}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 67f991c14b8a..5810cec54a7a 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -22,6 +22,7 @@
22 22
23 .text 23 .text
24.globl __strncpy_user; 24.globl __strncpy_user;
25.type __strncpy_user, @function
25.align 4; 26.align 4;
26__strncpy_user: 27__strncpy_user:
27 28
@@ -50,7 +51,7 @@ __strncpy_user:
503: 513:
51 rtsd r15,8 52 rtsd r15,8
52 nop 53 nop
53 54 .size __strncpy_user, . - __strncpy_user
54 55
55 .section .fixup, "ax" 56 .section .fixup, "ax"
56 .align 2 57 .align 2
@@ -72,6 +73,7 @@ __strncpy_user:
72 73
73 .text 74 .text
74.globl __strnlen_user; 75.globl __strnlen_user;
76.type __strnlen_user, @function
75.align 4; 77.align 4;
76__strnlen_user: 78__strnlen_user:
77 addik r3,r6,0 79 addik r3,r6,0
@@ -90,7 +92,7 @@ __strnlen_user:
903: 923:
91 rtsd r15,8 93 rtsd r15,8
92 nop 94 nop
93 95 .size __strnlen_user, . - __strnlen_user
94 96
95 .section .fixup,"ax" 97 .section .fixup,"ax"
964: 984:
@@ -108,6 +110,7 @@ __strnlen_user:
108 */ 110 */
109 .text 111 .text
110.globl __copy_tofrom_user; 112.globl __copy_tofrom_user;
113.type __copy_tofrom_user, @function
111.align 4; 114.align 4;
112__copy_tofrom_user: 115__copy_tofrom_user:
113 /* 116 /*
@@ -116,20 +119,34 @@ __copy_tofrom_user:
116 * r7, r3 - count 119 * r7, r3 - count
117 * r4 - tempval 120 * r4 - tempval
118 */ 121 */
119 addik r3,r7,0 122 beqid r7, 3f /* zero size is not likely */
120 beqi r3,3f 123 andi r3, r7, 0x3 /* filter add count */
1211: 124 bneid r3, 4f /* if is odd value then byte copying */
122 lbu r4,r6,r0 125 or r3, r5, r6 /* find if is any to/from unaligned */
123 addik r6,r6,1 126 andi r3, r3, 0x3 /* mask unaligned */
1242: 127 bneid r3, 1f /* it is unaligned -> then jump */
125 sb r4,r5,r0 128 or r3, r0, r0
126 addik r3,r3,-1 129
127 bneid r3,1b 130/* at least one 4 byte copy */
128 addik r5,r5,1 /* delay slot */ 1315: lw r4, r6, r3
1326: sw r4, r5, r3
133 addik r7, r7, -4
134 bneid r7, 5b
135 addik r3, r3, 4
136 addik r3, r7, 0
137 rtsd r15, 8
138 nop
1394: or r3, r0, r0
1401: lbu r4,r6,r3
1412: sb r4,r5,r3
142 addik r7,r7,-1
143 bneid r7,1b
144 addik r3,r3,1 /* delay slot */
1293: 1453:
146 addik r3,r7,0
130 rtsd r15,8 147 rtsd r15,8
131 nop 148 nop
132 149 .size __copy_tofrom_user, . - __copy_tofrom_user
133 150
134 .section __ex_table,"a" 151 .section __ex_table,"a"
135 .word 1b,3b,2b,3b 152 .word 1b,3b,2b,3b,5b,3b,6b,3b
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index 6c8a924d9e26..09c49ed87235 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile 2# Makefile
3# 3#
4 4
5obj-y := init.o 5obj-y := consistent.o init.o
6 6
7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o 7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
new file mode 100644
index 000000000000..5a59dad62bd2
--- /dev/null
+++ b/arch/microblaze/mm/consistent.c
@@ -0,0 +1,255 @@
1/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/gfp.h>
36
37#include <asm/pgalloc.h>
38#include <linux/io.h>
39#include <linux/hardirq.h>
40#include <asm/mmu_context.h>
41#include <asm/mmu.h>
42#include <linux/uaccess.h>
43#include <asm/pgtable.h>
44#include <asm/cpuinfo.h>
45#include <asm/tlbflush.h>
46
47#ifndef CONFIG_MMU
48/* I have to use dcache values because I can't relate on ram size */
49# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
50#endif
51
52/*
53 * Consistent memory allocators. Used for DMA devices that want to
54 * share uncached memory with the processor core.
55 * My crufty no-MMU approach is simple. In the HW platform we can optionally
56 * mirror the DDR up above the processor cacheable region. So, memory accessed
57 * in this mirror region will not be cached. It's alloced from the same
58 * pool as normal memory, but the handle we return is shifted up into the
59 * uncached region. This will no doubt cause big problems if memory allocated
60 * here is not also freed properly. -- JW
61 */
62void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
63{
64 unsigned long order, vaddr;
65 void *ret;
66 unsigned int i, err = 0;
67 struct page *page, *end;
68
69#ifdef CONFIG_MMU
70 phys_addr_t pa;
71 struct vm_struct *area;
72 unsigned long va;
73#endif
74
75 if (in_interrupt())
76 BUG();
77
78 /* Only allocate page size areas. */
79 size = PAGE_ALIGN(size);
80 order = get_order(size);
81
82 vaddr = __get_free_pages(gfp, order);
83 if (!vaddr)
84 return NULL;
85
86 /*
87 * we need to ensure that there are no cachelines in use,
88 * or worse dirty in this area.
89 */
90 flush_dcache_range(virt_to_phys((void *)vaddr),
91 virt_to_phys((void *)vaddr) + size);
92
93#ifndef CONFIG_MMU
94 ret = (void *)vaddr;
95 /*
96 * Here's the magic! Note if the uncached shadow is not implemented,
97 * it's up to the calling code to also test that condition and make
98 * other arranegments, such as manually flushing the cache and so on.
99 */
100# ifdef CONFIG_XILINX_UNCACHED_SHADOW
101 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
102# endif
103 if ((unsigned int)ret > cpuinfo.dcache_base &&
104 (unsigned int)ret < cpuinfo.dcache_high)
105 printk(KERN_WARNING
106 "ERROR: Your cache coherent area is CACHED!!!\n");
107
108 /* dma_handle is same as physical (shadowed) address */
109 *dma_handle = (dma_addr_t)ret;
110#else
111 /* Allocate some common virtual space to map the new pages. */
112 area = get_vm_area(size, VM_ALLOC);
113 if (!area) {
114 free_pages(vaddr, order);
115 return NULL;
116 }
117 va = (unsigned long) area->addr;
118 ret = (void *)va;
119
120 /* This gives us the real physical address of the first page. */
121 *dma_handle = pa = virt_to_bus((void *)vaddr);
122#endif
123
124 /*
125 * free wasted pages. We skip the first page since we know
126 * that it will have count = 1 and won't require freeing.
127 * We also mark the pages in use as reserved so that
128 * remap_page_range works.
129 */
130 page = virt_to_page(vaddr);
131 end = page + (1 << order);
132
133 split_page(page, order);
134
135 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
136#ifdef CONFIG_MMU
137 /* MS: This is the whole magic - use cache inhibit pages */
138 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
139#endif
140
141 SetPageReserved(page);
142 page++;
143 }
144
145 /* Free the otherwise unused pages. */
146 while (page < end) {
147 __free_page(page);
148 page++;
149 }
150
151 if (err) {
152 free_pages(vaddr, order);
153 return NULL;
154 }
155
156 return ret;
157}
158EXPORT_SYMBOL(consistent_alloc);
159
160/*
161 * free page(s) as defined by the above mapping.
162 */
163void consistent_free(size_t size, void *vaddr)
164{
165 struct page *page;
166
167 if (in_interrupt())
168 BUG();
169
170 size = PAGE_ALIGN(size);
171
172#ifndef CONFIG_MMU
173 /* Clear SHADOW_MASK bit in address, and free as per usual */
174# ifdef CONFIG_XILINX_UNCACHED_SHADOW
175 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
176# endif
177 page = virt_to_page(vaddr);
178
179 do {
180 ClearPageReserved(page);
181 __free_page(page);
182 page++;
183 } while (size -= PAGE_SIZE);
184#else
185 do {
186 pte_t *ptep;
187 unsigned long pfn;
188
189 ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
190 (unsigned int)vaddr),
191 (unsigned int)vaddr),
192 (unsigned int)vaddr);
193 if (!pte_none(*ptep) && pte_present(*ptep)) {
194 pfn = pte_pfn(*ptep);
195 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
196 if (pfn_valid(pfn)) {
197 page = pfn_to_page(pfn);
198
199 ClearPageReserved(page);
200 __free_page(page);
201 }
202 }
203 vaddr += PAGE_SIZE;
204 } while (size -= PAGE_SIZE);
205
206 /* flush tlb */
207 flush_tlb_all();
208#endif
209}
210EXPORT_SYMBOL(consistent_free);
211
212/*
213 * make an area consistent.
214 */
215void consistent_sync(void *vaddr, size_t size, int direction)
216{
217 unsigned long start;
218 unsigned long end;
219
220 start = (unsigned long)vaddr;
221
222 /* Convert start address back down to unshadowed memory region */
223#ifdef CONFIG_XILINX_UNCACHED_SHADOW
224 start &= ~UNCACHED_SHADOW_MASK;
225#endif
226 end = start + size;
227
228 switch (direction) {
229 case PCI_DMA_NONE:
230 BUG();
231 case PCI_DMA_FROMDEVICE: /* invalidate only */
232 invalidate_dcache_range(start, end);
233 break;
234 case PCI_DMA_TODEVICE: /* writeback only */
235 flush_dcache_range(start, end);
236 break;
237 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
238 flush_dcache_range(start, end);
239 break;
240 }
241}
242EXPORT_SYMBOL(consistent_sync);
243
244/*
245 * consistent_sync_page makes memory consistent. identical
246 * to consistent_sync, but takes a struct page instead of a
247 * virtual address
248 */
249void consistent_sync_page(struct page *page, unsigned long offset,
250 size_t size, int direction)
251{
252 unsigned long start = (unsigned long)page_address(page) + offset;
253 consistent_sync((void *)start, size, direction);
254}
255EXPORT_SYMBOL(consistent_sync_page);
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a66ff2..bab922993185 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
106 regs->esr = error_code; 106 regs->esr = error_code;
107 107
108 /* On a kernel SLB miss we can only check for a valid exception entry */ 108 /* On a kernel SLB miss we can only check for a valid exception entry */
109 if (kernel_mode(regs) && (address >= TASK_SIZE)) { 109 if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
110 printk(KERN_WARNING "kernel task_size exceed"); 110 printk(KERN_WARNING "kernel task_size exceed");
111 _exception(SIGSEGV, regs, code, address); 111 _exception(SIGSEGV, regs, code, address);
112 } 112 }
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
122 } 122 }
123#endif /* CONFIG_KGDB */ 123#endif /* CONFIG_KGDB */
124 124
125 if (in_atomic() || !mm) { 125 if (unlikely(in_atomic() || !mm)) {
126 if (kernel_mode(regs)) 126 if (kernel_mode(regs))
127 goto bad_area_nosemaphore; 127 goto bad_area_nosemaphore;
128 128
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
150 * source. If this is invalid we can skip the address space check, 150 * source. If this is invalid we can skip the address space check,
151 * thus avoiding the deadlock. 151 * thus avoiding the deadlock.
152 */ 152 */
153 if (!down_read_trylock(&mm->mmap_sem)) { 153 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
154 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 154 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
155 goto bad_area_nosemaphore; 155 goto bad_area_nosemaphore;
156 156
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
158 } 158 }
159 159
160 vma = find_vma(mm, address); 160 vma = find_vma(mm, address);
161 if (!vma) 161 if (unlikely(!vma))
162 goto bad_area; 162 goto bad_area;
163 163
164 if (vma->vm_start <= address) 164 if (vma->vm_start <= address)
165 goto good_area; 165 goto good_area;
166 166
167 if (!(vma->vm_flags & VM_GROWSDOWN)) 167 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
168 goto bad_area; 168 goto bad_area;
169 169
170 if (!is_write) 170 if (unlikely(!is_write))
171 goto bad_area; 171 goto bad_area;
172 172
173 /* 173 /*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
179 * before setting the user r1. Thus we allow the stack to 179 * before setting the user r1. Thus we allow the stack to
180 * expand to 1MB without further checks. 180 * expand to 1MB without further checks.
181 */ 181 */
182 if (address + 0x100000 < vma->vm_end) { 182 if (unlikely(address + 0x100000 < vma->vm_end)) {
183 183
184 /* get user regs even if this fault is in kernel mode */ 184 /* get user regs even if this fault is in kernel mode */
185 struct pt_regs *uregs = current->thread.regs; 185 struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
209 code = SEGV_ACCERR; 209 code = SEGV_ACCERR;
210 210
211 /* a write */ 211 /* a write */
212 if (is_write) { 212 if (unlikely(is_write)) {
213 if (!(vma->vm_flags & VM_WRITE)) 213 if (unlikely(!(vma->vm_flags & VM_WRITE)))
214 goto bad_area; 214 goto bad_area;
215 /* a read */ 215 /* a read */
216 } else { 216 } else {
217 /* protection fault */ 217 /* protection fault */
218 if (error_code & 0x08000000) 218 if (unlikely(error_code & 0x08000000))
219 goto bad_area; 219 goto bad_area;
220 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 220 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
221 goto bad_area; 221 goto bad_area;
222 } 222 }
223 223
@@ -235,7 +235,7 @@ survive:
235 goto do_sigbus; 235 goto do_sigbus;
236 BUG(); 236 BUG();
237 } 237 }
238 if (fault & VM_FAULT_MAJOR) 238 if (unlikely(fault & VM_FAULT_MAJOR))
239 current->maj_flt++; 239 current->maj_flt++;
240 else 240 else
241 current->min_flt++; 241 current->min_flt++;
@@ -273,16 +273,11 @@ bad_area_nosemaphore:
273 * us unable to handle the page fault gracefully. 273 * us unable to handle the page fault gracefully.
274 */ 274 */
275out_of_memory: 275out_of_memory:
276 if (current->pid == 1) {
277 yield();
278 down_read(&mm->mmap_sem);
279 goto survive;
280 }
281 up_read(&mm->mmap_sem); 276 up_read(&mm->mmap_sem);
282 printk(KERN_WARNING "VM: killing process %s\n", current->comm); 277 if (!user_mode(regs))
283 if (user_mode(regs)) 278 bad_page_fault(regs, address, SIGKILL);
284 do_exit(SIGKILL); 279 else
285 bad_page_fault(regs, address, SIGKILL); 280 pagefault_out_of_memory();
286 return; 281 return;
287 282
288do_sigbus: 283do_sigbus:
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a44892e7cd5b..cca3579d4268 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -15,6 +15,7 @@
15#include <linux/initrd.h> 15#include <linux/initrd.h>
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/pfn.h> 17#include <linux/pfn.h>
18#include <linux/slab.h>
18#include <linux/swap.h> 19#include <linux/swap.h>
19 20
20#include <asm/page.h> 21#include <asm/page.h>
@@ -23,6 +24,9 @@
23#include <asm/sections.h> 24#include <asm/sections.h>
24#include <asm/tlb.h> 25#include <asm/tlb.h>
25 26
27/* Use for MMU and noMMU because of PCI generic code */
28int mem_init_done;
29
26#ifndef CONFIG_MMU 30#ifndef CONFIG_MMU
27unsigned int __page_offset; 31unsigned int __page_offset;
28EXPORT_SYMBOL(__page_offset); 32EXPORT_SYMBOL(__page_offset);
@@ -30,7 +34,6 @@ EXPORT_SYMBOL(__page_offset);
30#else 34#else
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 36
33int mem_init_done;
34static int init_bootmem_done; 37static int init_bootmem_done;
35#endif /* CONFIG_MMU */ 38#endif /* CONFIG_MMU */
36 39
@@ -41,8 +44,10 @@ char *klimit = _end;
41 * have available. 44 * have available.
42 */ 45 */
43unsigned long memory_start; 46unsigned long memory_start;
47EXPORT_SYMBOL(memory_start);
44unsigned long memory_end; /* due to mm/nommu.c */ 48unsigned long memory_end; /* due to mm/nommu.c */
45unsigned long memory_size; 49unsigned long memory_size;
50EXPORT_SYMBOL(memory_size);
46 51
47/* 52/*
48 * paging_init() sets up the page tables - in fact we've already done this. 53 * paging_init() sets up the page tables - in fact we've already done this.
@@ -162,7 +167,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
162 for (addr = begin; addr < end; addr += PAGE_SIZE) { 167 for (addr = begin; addr < end; addr += PAGE_SIZE) {
163 ClearPageReserved(virt_to_page(addr)); 168 ClearPageReserved(virt_to_page(addr));
164 init_page_count(virt_to_page(addr)); 169 init_page_count(virt_to_page(addr));
165 memset((void *)addr, 0xcc, PAGE_SIZE);
166 free_page(addr); 170 free_page(addr);
167 totalram_pages++; 171 totalram_pages++;
168 } 172 }
@@ -192,12 +196,6 @@ void free_initmem(void)
192 (unsigned long)(&__init_end)); 196 (unsigned long)(&__init_end));
193} 197}
194 198
195/* FIXME from arch/powerpc/mm/mem.c*/
196void show_mem(void)
197{
198 printk(KERN_NOTICE "%s\n", __func__);
199}
200
201void __init mem_init(void) 199void __init mem_init(void)
202{ 200{
203 high_memory = (void *)__va(memory_end); 201 high_memory = (void *)__va(memory_end);
@@ -207,20 +205,14 @@ void __init mem_init(void)
207 printk(KERN_INFO "Memory: %luk/%luk available\n", 205 printk(KERN_INFO "Memory: %luk/%luk available\n",
208 nr_free_pages() << (PAGE_SHIFT-10), 206 nr_free_pages() << (PAGE_SHIFT-10),
209 num_physpages << (PAGE_SHIFT-10)); 207 num_physpages << (PAGE_SHIFT-10));
210#ifdef CONFIG_MMU
211 mem_init_done = 1; 208 mem_init_done = 1;
212#endif
213} 209}
214 210
215#ifndef CONFIG_MMU 211#ifndef CONFIG_MMU
216/* Check against bounds of physical memory */ 212int page_is_ram(unsigned long pfn)
217int ___range_ok(unsigned long addr, unsigned long size)
218{ 213{
219 return ((addr < memory_start) || 214 return __range_ok(pfn, 0);
220 ((addr + size) > memory_end));
221} 215}
222EXPORT_SYMBOL(___range_ok);
223
224#else 216#else
225int page_is_ram(unsigned long pfn) 217int page_is_ram(unsigned long pfn)
226{ 218{
@@ -348,4 +340,27 @@ void __init *early_get_page(void)
348 } 340 }
349 return p; 341 return p;
350} 342}
343
351#endif /* CONFIG_MMU */ 344#endif /* CONFIG_MMU */
345
346void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
347{
348 if (mem_init_done)
349 return kmalloc(size, mask);
350 else
351 return alloc_bootmem(size);
352}
353
354void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
355{
356 void *p;
357
358 if (mem_init_done)
359 p = kzalloc(size, mask);
360 else {
361 p = alloc_bootmem(size);
362 if (p)
363 memset(p, 0, size);
364 }
365 return p;
366}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 46c4ca5d15c5..59bf2335a4ce 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -42,6 +42,7 @@
42 42
43unsigned long ioremap_base; 43unsigned long ioremap_base;
44unsigned long ioremap_bot; 44unsigned long ioremap_bot;
45EXPORT_SYMBOL(ioremap_bot);
45 46
46/* The maximum lowmem defaults to 768Mb, but this can be configured to 47/* The maximum lowmem defaults to 768Mb, but this can be configured to
47 * another value. 48 * another value.
@@ -103,7 +104,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
103 area = get_vm_area(size, VM_IOREMAP); 104 area = get_vm_area(size, VM_IOREMAP);
104 if (area == NULL) 105 if (area == NULL)
105 return NULL; 106 return NULL;
106 v = VMALLOC_VMADDR(area->addr); 107 v = (unsigned long) area->addr;
107 } else { 108 } else {
108 v = (ioremap_bot -= size); 109 v = (ioremap_bot -= size);
109 } 110 }
@@ -144,7 +145,6 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
144 pmd_t *pd; 145 pmd_t *pd;
145 pte_t *pg; 146 pte_t *pg;
146 int err = -ENOMEM; 147 int err = -ENOMEM;
147 /* spin_lock(&init_mm.page_table_lock); */
148 /* Use upper 10 bits of VA to index the first level map */ 148 /* Use upper 10 bits of VA to index the first level map */
149 pd = pmd_offset(pgd_offset_k(va), va); 149 pd = pmd_offset(pgd_offset_k(va), va);
150 /* Use middle 10 bits of VA to index the second-level map */ 150 /* Use middle 10 bits of VA to index the second-level map */
@@ -155,39 +155,13 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
155 err = 0; 155 err = 0;
156 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 156 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
157 __pgprot(flags))); 157 __pgprot(flags)));
158 if (mem_init_done) 158 if (unlikely(mem_init_done))
159 flush_HPTE(0, va, pmd_val(*pd)); 159 flush_HPTE(0, va, pmd_val(*pd));
160 /* flush_HPTE(0, va, pg); */ 160 /* flush_HPTE(0, va, pg); */
161
162 } 161 }
163 /* spin_unlock(&init_mm.page_table_lock); */
164 return err; 162 return err;
165} 163}
166 164
167void __init adjust_total_lowmem(void)
168{
169/* TBD */
170#if 0
171 unsigned long max_low_mem = MAX_LOW_MEM;
172
173 if (total_lowmem > max_low_mem) {
174 total_lowmem = max_low_mem;
175#ifndef CONFIG_HIGHMEM
176 printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
177 "CONFIG_HIGHMEM to reach %ld Mb\n",
178 max_low_mem >> 20, total_memory >> 20);
179 total_memory = total_lowmem;
180#endif /* CONFIG_HIGHMEM */
181 }
182#endif
183}
184
185static void show_tmem(unsigned long tmem)
186{
187 volatile unsigned long a;
188 a = a + tmem;
189}
190
191/* 165/*
192 * Map in all of physical memory starting at CONFIG_KERNEL_START. 166 * Map in all of physical memory starting at CONFIG_KERNEL_START.
193 */ 167 */
@@ -197,7 +171,6 @@ void __init mapin_ram(void)
197 171
198 v = CONFIG_KERNEL_START; 172 v = CONFIG_KERNEL_START;
199 p = memory_start; 173 p = memory_start;
200 show_tmem(memory_size);
201 for (s = 0; s < memory_size; s += PAGE_SIZE) { 174 for (s = 0; s < memory_size; s += PAGE_SIZE) {
202 f = _PAGE_PRESENT | _PAGE_ACCESSED | 175 f = _PAGE_PRESENT | _PAGE_ACCESSED |
203 _PAGE_SHARED | _PAGE_HWEXEC; 176 _PAGE_SHARED | _PAGE_HWEXEC;
@@ -216,24 +189,6 @@ void __init mapin_ram(void)
216/* is x a power of 2? */ 189/* is x a power of 2? */
217#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) 190#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
218 191
219/*
220 * Set up a mapping for a block of I/O.
221 * virt, phys, size must all be page-aligned.
222 * This should only be called before ioremap is called.
223 */
224void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
225 unsigned int size, int flags)
226{
227 int i;
228
229 if (virt > CONFIG_KERNEL_START && virt < ioremap_bot)
230 ioremap_bot = ioremap_base = virt;
231
232 /* Put it in the page tables. */
233 for (i = 0; i < size; i += PAGE_SIZE)
234 map_page(virt + i, phys + i, flags);
235}
236
237/* Scan the real Linux page tables and return a PTE pointer for 192/* Scan the real Linux page tables and return a PTE pointer for
238 * a virtual address in a context. 193 * a virtual address in a context.
239 * Returns true (1) if PTE was found, zero otherwise. The pointer to 194 * Returns true (1) if PTE was found, zero otherwise. The pointer to
@@ -284,3 +239,18 @@ unsigned long iopa(unsigned long addr)
284 239
285 return pa; 240 return pa;
286} 241}
242
243__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
244 unsigned long address)
245{
246 pte_t *pte;
247 if (mem_init_done) {
248 pte = (pte_t *)__get_free_page(GFP_KERNEL |
249 __GFP_REPEAT | __GFP_ZERO);
250 } else {
251 pte = (pte_t *)early_get_page();
252 if (pte)
253 clear_page(pte);
254 }
255 return pte;
256}
diff --git a/arch/microblaze/oprofile/Makefile b/arch/microblaze/oprofile/Makefile
new file mode 100644
index 000000000000..0d0348c8af97
--- /dev/null
+++ b/arch/microblaze/oprofile/Makefile
@@ -0,0 +1,13 @@
1#
2# arch/microblaze/oprofile/Makefile
3#
4
5obj-$(CONFIG_OPROFILE) += oprofile.o
6
7DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
8 oprof.o cpu_buffer.o buffer_sync.o \
9 event_buffer.o oprofile_files.o \
10 oprofilefs.o oprofile_stats.o \
11 timer_int.o )
12
13oprofile-y := $(DRIVER_OBJS) microblaze_oprofile.o
diff --git a/arch/microblaze/oprofile/microblaze_oprofile.c b/arch/microblaze/oprofile/microblaze_oprofile.c
new file mode 100644
index 000000000000..def17e59888e
--- /dev/null
+++ b/arch/microblaze/oprofile/microblaze_oprofile.c
@@ -0,0 +1,22 @@
1/*
2 * Microblaze oprofile code
3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/oprofile.h>
13#include <linux/init.h>
14
15int __init oprofile_arch_init(struct oprofile_operations *ops)
16{
17 return -1;
18}
19
20void oprofile_arch_exit(void)
21{
22}
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
new file mode 100644
index 000000000000..9889cc2e1294
--- /dev/null
+++ b/arch/microblaze/pci/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile
3#
4
5obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
6obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c
new file mode 100644
index 000000000000..25f18f017f21
--- /dev/null
+++ b/arch/microblaze/pci/indirect_pci.c
@@ -0,0 +1,163 @@
1/*
2 * Support for indirect PCI bridges.
3 *
4 * Copyright (C) 1998 Gabriel Paubert.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19#include <asm/prom.h>
20#include <asm/pci-bridge.h>
21
22static int
23indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
24 int len, u32 *val)
25{
26 struct pci_controller *hose = pci_bus_to_host(bus);
27 volatile void __iomem *cfg_data;
28 u8 cfg_type = 0;
29 u32 bus_no, reg;
30
31 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
32 if (bus->number != hose->first_busno)
33 return PCIBIOS_DEVICE_NOT_FOUND;
34 if (devfn != 0)
35 return PCIBIOS_DEVICE_NOT_FOUND;
36 }
37
38 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
39 if (bus->number != hose->first_busno)
40 cfg_type = 1;
41
42 bus_no = (bus->number == hose->first_busno) ?
43 hose->self_busno : bus->number;
44
45 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
46 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
47 else
48 reg = offset & 0xfc; /* Only 3 bits for function */
49
50 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
51 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
52 (devfn << 8) | reg | cfg_type));
53 else
54 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
55 (devfn << 8) | reg | cfg_type));
56
57 /*
58 * Note: the caller has already checked that offset is
59 * suitably aligned and that len is 1, 2 or 4.
60 */
61 cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
62 switch (len) {
63 case 1:
64 *val = in_8(cfg_data);
65 break;
66 case 2:
67 *val = in_le16(cfg_data);
68 break;
69 default:
70 *val = in_le32(cfg_data);
71 break;
72 }
73 return PCIBIOS_SUCCESSFUL;
74}
75
76static int
77indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
78 int len, u32 val)
79{
80 struct pci_controller *hose = pci_bus_to_host(bus);
81 volatile void __iomem *cfg_data;
82 u8 cfg_type = 0;
83 u32 bus_no, reg;
84
85 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
86 if (bus->number != hose->first_busno)
87 return PCIBIOS_DEVICE_NOT_FOUND;
88 if (devfn != 0)
89 return PCIBIOS_DEVICE_NOT_FOUND;
90 }
91
92 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
93 if (bus->number != hose->first_busno)
94 cfg_type = 1;
95
96 bus_no = (bus->number == hose->first_busno) ?
97 hose->self_busno : bus->number;
98
99 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
100 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
101 else
102 reg = offset & 0xfc;
103
104 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
105 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
106 (devfn << 8) | reg | cfg_type));
107 else
108 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
109 (devfn << 8) | reg | cfg_type));
110
111 /* surpress setting of PCI_PRIMARY_BUS */
112 if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
113 if ((offset == PCI_PRIMARY_BUS) &&
114 (bus->number == hose->first_busno))
115 val &= 0xffffff00;
116
117 /* Workaround for PCI_28 Errata in 440EPx/GRx */
118 if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
119 offset == PCI_CACHE_LINE_SIZE) {
120 val = 0;
121 }
122
123 /*
124 * Note: the caller has already checked that offset is
125 * suitably aligned and that len is 1, 2 or 4.
126 */
127 cfg_data = hose->cfg_data + (offset & 3);
128 switch (len) {
129 case 1:
130 out_8(cfg_data, val);
131 break;
132 case 2:
133 out_le16(cfg_data, val);
134 break;
135 default:
136 out_le32(cfg_data, val);
137 break;
138 }
139
140 return PCIBIOS_SUCCESSFUL;
141}
142
143static struct pci_ops indirect_pci_ops = {
144 .read = indirect_read_config,
145 .write = indirect_write_config,
146};
147
148void __init
149setup_indirect_pci(struct pci_controller *hose,
150 resource_size_t cfg_addr,
151 resource_size_t cfg_data, u32 flags)
152{
153 resource_size_t base = cfg_addr & PAGE_MASK;
154 void __iomem *mbase;
155
156 mbase = ioremap(base, PAGE_SIZE);
157 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
158 if ((cfg_data & PAGE_MASK) != base)
159 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
160 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
161 hose->ops = &indirect_pci_ops;
162 hose->indirect_type = flags;
163}
diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c
new file mode 100644
index 000000000000..3fbf16f4e16c
--- /dev/null
+++ b/arch/microblaze/pci/iomap.c
@@ -0,0 +1,39 @@
1/*
2 * ppc64 "iomap" interface implementation.
3 *
4 * (C) Copyright 2004 Linus Torvalds
5 */
6#include <linux/init.h>
7#include <linux/pci.h>
8#include <linux/mm.h>
9#include <asm/io.h>
10#include <asm/pci-bridge.h>
11
12void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
13{
14 resource_size_t start = pci_resource_start(dev, bar);
15 resource_size_t len = pci_resource_len(dev, bar);
16 unsigned long flags = pci_resource_flags(dev, bar);
17
18 if (!len)
19 return NULL;
20 if (max && len > max)
21 len = max;
22 if (flags & IORESOURCE_IO)
23 return ioport_map(start, len);
24 if (flags & IORESOURCE_MEM)
25 return ioremap(start, len);
26 /* What? */
27 return NULL;
28}
29EXPORT_SYMBOL(pci_iomap);
30
31void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
32{
33 if (isa_vaddr_is_ioport(addr))
34 return;
35 if (pcibios_vaddr_is_ioport(addr))
36 return;
37 iounmap(addr);
38}
39EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
new file mode 100644
index 000000000000..9cb782b8e036
--- /dev/null
+++ b/arch/microblaze/pci/pci-common.c
@@ -0,0 +1,1642 @@
1/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29#include <linux/slab.h>
30
31#include <asm/processor.h>
32#include <asm/io.h>
33#include <asm/prom.h>
34#include <asm/pci-bridge.h>
35#include <asm/byteorder.h>
36
37static DEFINE_SPINLOCK(hose_spinlock);
38LIST_HEAD(hose_list);
39
40/* XXX kill that some day ... */
41static int global_phb_number; /* Global phb counter */
42
43/* ISA Memory physical address */
44resource_size_t isa_mem_base;
45
46/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
47unsigned int pci_flags;
48
49static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
50
51void set_pci_dma_ops(struct dma_map_ops *dma_ops)
52{
53 pci_dma_ops = dma_ops;
54}
55
56struct dma_map_ops *get_pci_dma_ops(void)
57{
58 return pci_dma_ops;
59}
60EXPORT_SYMBOL(get_pci_dma_ops);
61
62int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
63{
64 return dma_set_mask(&dev->dev, mask);
65}
66
67int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
68{
69 int rc;
70
71 rc = dma_set_mask(&dev->dev, mask);
72 dev->dev.coherent_dma_mask = dev->dma_mask;
73
74 return rc;
75}
76
77struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
78{
79 struct pci_controller *phb;
80
81 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
82 if (!phb)
83 return NULL;
84 spin_lock(&hose_spinlock);
85 phb->global_number = global_phb_number++;
86 list_add_tail(&phb->list_node, &hose_list);
87 spin_unlock(&hose_spinlock);
88 phb->dn = dev;
89 phb->is_dynamic = mem_init_done;
90 return phb;
91}
92
93void pcibios_free_controller(struct pci_controller *phb)
94{
95 spin_lock(&hose_spinlock);
96 list_del(&phb->list_node);
97 spin_unlock(&hose_spinlock);
98
99 if (phb->is_dynamic)
100 kfree(phb);
101}
102
103static resource_size_t pcibios_io_size(const struct pci_controller *hose)
104{
105 return hose->io_resource.end - hose->io_resource.start + 1;
106}
107
108int pcibios_vaddr_is_ioport(void __iomem *address)
109{
110 int ret = 0;
111 struct pci_controller *hose;
112 resource_size_t size;
113
114 spin_lock(&hose_spinlock);
115 list_for_each_entry(hose, &hose_list, list_node) {
116 size = pcibios_io_size(hose);
117 if (address >= hose->io_base_virt &&
118 address < (hose->io_base_virt + size)) {
119 ret = 1;
120 break;
121 }
122 }
123 spin_unlock(&hose_spinlock);
124 return ret;
125}
126
127unsigned long pci_address_to_pio(phys_addr_t address)
128{
129 struct pci_controller *hose;
130 resource_size_t size;
131 unsigned long ret = ~0;
132
133 spin_lock(&hose_spinlock);
134 list_for_each_entry(hose, &hose_list, list_node) {
135 size = pcibios_io_size(hose);
136 if (address >= hose->io_base_phys &&
137 address < (hose->io_base_phys + size)) {
138 unsigned long base =
139 (unsigned long)hose->io_base_virt - _IO_BASE;
140 ret = base + (address - hose->io_base_phys);
141 break;
142 }
143 }
144 spin_unlock(&hose_spinlock);
145
146 return ret;
147}
148EXPORT_SYMBOL_GPL(pci_address_to_pio);
149
150/*
151 * Return the domain number for this bus.
152 */
153int pci_domain_nr(struct pci_bus *bus)
154{
155 struct pci_controller *hose = pci_bus_to_host(bus);
156
157 return hose->global_number;
158}
159EXPORT_SYMBOL(pci_domain_nr);
160
161/* This routine is meant to be used early during boot, when the
162 * PCI bus numbers have not yet been assigned, and you need to
163 * issue PCI config cycles to an OF device.
164 * It could also be used to "fix" RTAS config cycles if you want
165 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
166 * config cycles.
167 */
168struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
169{
170 while (node) {
171 struct pci_controller *hose, *tmp;
172 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
173 if (hose->dn == node)
174 return hose;
175 node = node->parent;
176 }
177 return NULL;
178}
179
180static ssize_t pci_show_devspec(struct device *dev,
181 struct device_attribute *attr, char *buf)
182{
183 struct pci_dev *pdev;
184 struct device_node *np;
185
186 pdev = to_pci_dev(dev);
187 np = pci_device_to_OF_node(pdev);
188 if (np == NULL || np->full_name == NULL)
189 return 0;
190 return sprintf(buf, "%s", np->full_name);
191}
192static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
193
194/* Add sysfs properties */
195int pcibios_add_platform_entries(struct pci_dev *pdev)
196{
197 return device_create_file(&pdev->dev, &dev_attr_devspec);
198}
199
200char __devinit *pcibios_setup(char *str)
201{
202 return str;
203}
204
205/*
206 * Reads the interrupt pin to determine if interrupt is use by card.
207 * If the interrupt is used, then gets the interrupt line from the
208 * openfirmware and sets it in the pci_dev and pci_config line.
209 */
210int pci_read_irq_line(struct pci_dev *pci_dev)
211{
212 struct of_irq oirq;
213 unsigned int virq;
214
215 /* The current device-tree that iSeries generates from the HV
216 * PCI informations doesn't contain proper interrupt routing,
217 * and all the fallback would do is print out crap, so we
218 * don't attempt to resolve the interrupts here at all, some
219 * iSeries specific fixup does it.
220 *
221 * In the long run, we will hopefully fix the generated device-tree
222 * instead.
223 */
224 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
225
226#ifdef DEBUG
227 memset(&oirq, 0xff, sizeof(oirq));
228#endif
229 /* Try to get a mapping from the device-tree */
230 if (of_irq_map_pci(pci_dev, &oirq)) {
231 u8 line, pin;
232
233 /* If that fails, lets fallback to what is in the config
234 * space and map that through the default controller. We
235 * also set the type to level low since that's what PCI
236 * interrupts are. If your platform does differently, then
237 * either provide a proper interrupt tree or don't use this
238 * function.
239 */
240 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
241 return -1;
242 if (pin == 0)
243 return -1;
244 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
245 line == 0xff || line == 0) {
246 return -1;
247 }
248 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
249 line, pin);
250
251 virq = irq_create_mapping(NULL, line);
252 if (virq != NO_IRQ)
253 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
254 } else {
255 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
256 oirq.size, oirq.specifier[0], oirq.specifier[1],
257 oirq.controller ? oirq.controller->full_name :
258 "<default>");
259
260 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
261 oirq.size);
262 }
263 if (virq == NO_IRQ) {
264 pr_debug(" Failed to map !\n");
265 return -1;
266 }
267
268 pr_debug(" Mapped to linux irq %d\n", virq);
269
270 pci_dev->irq = virq;
271
272 return 0;
273}
274EXPORT_SYMBOL(pci_read_irq_line);
275
276/*
277 * Platform support for /proc/bus/pci/X/Y mmap()s,
278 * modelled on the sparc64 implementation by Dave Miller.
279 * -- paulus.
280 */
281
282/*
283 * Adjust vm_pgoff of VMA such that it is the physical page offset
284 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
285 *
286 * Basically, the user finds the base address for his device which he wishes
287 * to mmap. They read the 32-bit value from the config space base register,
288 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
289 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
290 *
291 * Returns negative error code on failure, zero on success.
292 */
293static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
294 resource_size_t *offset,
295 enum pci_mmap_state mmap_state)
296{
297 struct pci_controller *hose = pci_bus_to_host(dev->bus);
298 unsigned long io_offset = 0;
299 int i, res_bit;
300
301 if (hose == 0)
302 return NULL; /* should never happen */
303
304 /* If memory, add on the PCI bridge address offset */
305 if (mmap_state == pci_mmap_mem) {
306#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
307 *offset += hose->pci_mem_offset;
308#endif
309 res_bit = IORESOURCE_MEM;
310 } else {
311 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
312 *offset += io_offset;
313 res_bit = IORESOURCE_IO;
314 }
315
316 /*
317 * Check that the offset requested corresponds to one of the
318 * resources of the device.
319 */
320 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
321 struct resource *rp = &dev->resource[i];
322 int flags = rp->flags;
323
324 /* treat ROM as memory (should be already) */
325 if (i == PCI_ROM_RESOURCE)
326 flags |= IORESOURCE_MEM;
327
328 /* Active and same type? */
329 if ((flags & res_bit) == 0)
330 continue;
331
332 /* In the range of this resource? */
333 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
334 continue;
335
336 /* found it! construct the final physical address */
337 if (mmap_state == pci_mmap_io)
338 *offset += hose->io_base_phys - io_offset;
339 return rp;
340 }
341
342 return NULL;
343}
344
345/*
346 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
347 * device mapping.
348 */
349static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
350 pgprot_t protection,
351 enum pci_mmap_state mmap_state,
352 int write_combine)
353{
354 pgprot_t prot = protection;
355
356 /* Write combine is always 0 on non-memory space mappings. On
357 * memory space, if the user didn't pass 1, we check for a
358 * "prefetchable" resource. This is a bit hackish, but we use
359 * this to workaround the inability of /sysfs to provide a write
360 * combine bit
361 */
362 if (mmap_state != pci_mmap_mem)
363 write_combine = 0;
364 else if (write_combine == 0) {
365 if (rp->flags & IORESOURCE_PREFETCH)
366 write_combine = 1;
367 }
368
369 return pgprot_noncached(prot);
370}
371
372/*
373 * This one is used by /dev/mem and fbdev who have no clue about the
374 * PCI device, it tries to find the PCI device first and calls the
375 * above routine
376 */
377pgprot_t pci_phys_mem_access_prot(struct file *file,
378 unsigned long pfn,
379 unsigned long size,
380 pgprot_t prot)
381{
382 struct pci_dev *pdev = NULL;
383 struct resource *found = NULL;
384 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
385 int i;
386
387 if (page_is_ram(pfn))
388 return prot;
389
390 prot = pgprot_noncached(prot);
391 for_each_pci_dev(pdev) {
392 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
393 struct resource *rp = &pdev->resource[i];
394 int flags = rp->flags;
395
396 /* Active and same type? */
397 if ((flags & IORESOURCE_MEM) == 0)
398 continue;
399 /* In the range of this resource? */
400 if (offset < (rp->start & PAGE_MASK) ||
401 offset > rp->end)
402 continue;
403 found = rp;
404 break;
405 }
406 if (found)
407 break;
408 }
409 if (found) {
410 if (found->flags & IORESOURCE_PREFETCH)
411 prot = pgprot_noncached_wc(prot);
412 pci_dev_put(pdev);
413 }
414
415 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
416 (unsigned long long)offset, pgprot_val(prot));
417
418 return prot;
419}
420
421/*
422 * Perform the actual remap of the pages for a PCI device mapping, as
423 * appropriate for this architecture. The region in the process to map
424 * is described by vm_start and vm_end members of VMA, the base physical
425 * address is found in vm_pgoff.
426 * The pci device structure is provided so that architectures may make mapping
427 * decisions on a per-device or per-bus basis.
428 *
429 * Returns a negative error code on failure, zero on success.
430 */
431int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
432 enum pci_mmap_state mmap_state, int write_combine)
433{
434 resource_size_t offset =
435 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
436 struct resource *rp;
437 int ret;
438
439 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
440 if (rp == NULL)
441 return -EINVAL;
442
443 vma->vm_pgoff = offset >> PAGE_SHIFT;
444 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
445 vma->vm_page_prot,
446 mmap_state, write_combine);
447
448 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
449 vma->vm_end - vma->vm_start, vma->vm_page_prot);
450
451 return ret;
452}
453
454/* This provides legacy IO read access on a bus */
455int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
456{
457 unsigned long offset;
458 struct pci_controller *hose = pci_bus_to_host(bus);
459 struct resource *rp = &hose->io_resource;
460 void __iomem *addr;
461
462 /* Check if port can be supported by that bus. We only check
463 * the ranges of the PHB though, not the bus itself as the rules
464 * for forwarding legacy cycles down bridges are not our problem
465 * here. So if the host bridge supports it, we do it.
466 */
467 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
468 offset += port;
469
470 if (!(rp->flags & IORESOURCE_IO))
471 return -ENXIO;
472 if (offset < rp->start || (offset + size) > rp->end)
473 return -ENXIO;
474 addr = hose->io_base_virt + port;
475
476 switch (size) {
477 case 1:
478 *((u8 *)val) = in_8(addr);
479 return 1;
480 case 2:
481 if (port & 1)
482 return -EINVAL;
483 *((u16 *)val) = in_le16(addr);
484 return 2;
485 case 4:
486 if (port & 3)
487 return -EINVAL;
488 *((u32 *)val) = in_le32(addr);
489 return 4;
490 }
491 return -EINVAL;
492}
493
494/* This provides legacy IO write access on a bus */
495int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
496{
497 unsigned long offset;
498 struct pci_controller *hose = pci_bus_to_host(bus);
499 struct resource *rp = &hose->io_resource;
500 void __iomem *addr;
501
502 /* Check if port can be supported by that bus. We only check
503 * the ranges of the PHB though, not the bus itself as the rules
504 * for forwarding legacy cycles down bridges are not our problem
505 * here. So if the host bridge supports it, we do it.
506 */
507 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
508 offset += port;
509
510 if (!(rp->flags & IORESOURCE_IO))
511 return -ENXIO;
512 if (offset < rp->start || (offset + size) > rp->end)
513 return -ENXIO;
514 addr = hose->io_base_virt + port;
515
516 /* WARNING: The generic code is idiotic. It gets passed a pointer
517 * to what can be a 1, 2 or 4 byte quantity and always reads that
518 * as a u32, which means that we have to correct the location of
519 * the data read within those 32 bits for size 1 and 2
520 */
521 switch (size) {
522 case 1:
523 out_8(addr, val >> 24);
524 return 1;
525 case 2:
526 if (port & 1)
527 return -EINVAL;
528 out_le16(addr, val >> 16);
529 return 2;
530 case 4:
531 if (port & 3)
532 return -EINVAL;
533 out_le32(addr, val);
534 return 4;
535 }
536 return -EINVAL;
537}
538
539/* This provides legacy IO or memory mmap access on a bus */
540int pci_mmap_legacy_page_range(struct pci_bus *bus,
541 struct vm_area_struct *vma,
542 enum pci_mmap_state mmap_state)
543{
544 struct pci_controller *hose = pci_bus_to_host(bus);
545 resource_size_t offset =
546 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
547 resource_size_t size = vma->vm_end - vma->vm_start;
548 struct resource *rp;
549
550 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
551 pci_domain_nr(bus), bus->number,
552 mmap_state == pci_mmap_mem ? "MEM" : "IO",
553 (unsigned long long)offset,
554 (unsigned long long)(offset + size - 1));
555
556 if (mmap_state == pci_mmap_mem) {
557 /* Hack alert !
558 *
559 * Because X is lame and can fail starting if it gets an error
560 * trying to mmap legacy_mem (instead of just moving on without
561 * legacy memory access) we fake it here by giving it anonymous
562 * memory, effectively behaving just like /dev/zero
563 */
564 if ((offset + size) > hose->isa_mem_size) {
565#ifdef CONFIG_MMU
566 printk(KERN_DEBUG
567 "Process %s (pid:%d) mapped non-existing PCI"
568 "legacy memory for 0%04x:%02x\n",
569 current->comm, current->pid, pci_domain_nr(bus),
570 bus->number);
571#endif
572 if (vma->vm_flags & VM_SHARED)
573 return shmem_zero_setup(vma);
574 return 0;
575 }
576 offset += hose->isa_mem_phys;
577 } else {
578 unsigned long io_offset = (unsigned long)hose->io_base_virt - \
579 _IO_BASE;
580 unsigned long roffset = offset + io_offset;
581 rp = &hose->io_resource;
582 if (!(rp->flags & IORESOURCE_IO))
583 return -ENXIO;
584 if (roffset < rp->start || (roffset + size) > rp->end)
585 return -ENXIO;
586 offset += hose->io_base_phys;
587 }
588 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
589
590 vma->vm_pgoff = offset >> PAGE_SHIFT;
591 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
592 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
593 vma->vm_end - vma->vm_start,
594 vma->vm_page_prot);
595}
596
597void pci_resource_to_user(const struct pci_dev *dev, int bar,
598 const struct resource *rsrc,
599 resource_size_t *start, resource_size_t *end)
600{
601 struct pci_controller *hose = pci_bus_to_host(dev->bus);
602 resource_size_t offset = 0;
603
604 if (hose == NULL)
605 return;
606
607 if (rsrc->flags & IORESOURCE_IO)
608 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
609
610 /* We pass a fully fixed up address to userland for MMIO instead of
611 * a BAR value because X is lame and expects to be able to use that
612 * to pass to /dev/mem !
613 *
614 * That means that we'll have potentially 64 bits values where some
615 * userland apps only expect 32 (like X itself since it thinks only
616 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
617 * 32 bits CHRPs :-(
618 *
619 * Hopefully, the sysfs insterface is immune to that gunk. Once X
620 * has been fixed (and the fix spread enough), we can re-enable the
621 * 2 lines below and pass down a BAR value to userland. In that case
622 * we'll also have to re-enable the matching code in
623 * __pci_mmap_make_offset().
624 *
625 * BenH.
626 */
627#if 0
628 else if (rsrc->flags & IORESOURCE_MEM)
629 offset = hose->pci_mem_offset;
630#endif
631
632 *start = rsrc->start - offset;
633 *end = rsrc->end - offset;
634}
635
636/**
637 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
638 * @hose: newly allocated pci_controller to be setup
639 * @dev: device node of the host bridge
640 * @primary: set if primary bus (32 bits only, soon to be deprecated)
641 *
642 * This function will parse the "ranges" property of a PCI host bridge device
643 * node and setup the resource mapping of a pci controller based on its
644 * content.
645 *
646 * Life would be boring if it wasn't for a few issues that we have to deal
647 * with here:
648 *
649 * - We can only cope with one IO space range and up to 3 Memory space
650 * ranges. However, some machines (thanks Apple !) tend to split their
651 * space into lots of small contiguous ranges. So we have to coalesce.
652 *
653 * - We can only cope with all memory ranges having the same offset
654 * between CPU addresses and PCI addresses. Unfortunately, some bridges
655 * are setup for a large 1:1 mapping along with a small "window" which
656 * maps PCI address 0 to some arbitrary high address of the CPU space in
657 * order to give access to the ISA memory hole.
658 * The way out of here that I've chosen for now is to always set the
659 * offset based on the first resource found, then override it if we
660 * have a different offset and the previous was set by an ISA hole.
661 *
662 * - Some busses have IO space not starting at 0, which causes trouble with
663 * the way we do our IO resource renumbering. The code somewhat deals with
664 * it for 64 bits but I would expect problems on 32 bits.
665 *
666 * - Some 32 bits platforms such as 4xx can have physical space larger than
667 * 32 bits so we need to use 64 bits values for the parsing
668 */
669void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
670 struct device_node *dev,
671 int primary)
672{
673 const u32 *ranges;
674 int rlen;
675 int pna = of_n_addr_cells(dev);
676 int np = pna + 5;
677 int memno = 0, isa_hole = -1;
678 u32 pci_space;
679 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
680 unsigned long long isa_mb = 0;
681 struct resource *res;
682
683 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
684 dev->full_name, primary ? "(primary)" : "");
685
686 /* Get ranges property */
687 ranges = of_get_property(dev, "ranges", &rlen);
688 if (ranges == NULL)
689 return;
690
691 /* Parse it */
692 pr_debug("Parsing ranges property...\n");
693 while ((rlen -= np * 4) >= 0) {
694 /* Read next ranges element */
695 pci_space = ranges[0];
696 pci_addr = of_read_number(ranges + 1, 2);
697 cpu_addr = of_translate_address(dev, ranges + 3);
698 size = of_read_number(ranges + pna + 3, 2);
699
700 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
701 "cpu_addr:0x%016llx size:0x%016llx\n",
702 pci_space, pci_addr, cpu_addr, size);
703
704 ranges += np;
705
706 /* If we failed translation or got a zero-sized region
707 * (some FW try to feed us with non sensical zero sized regions
708 * such as power3 which look like some kind of attempt
709 * at exposing the VGA memory hole)
710 */
711 if (cpu_addr == OF_BAD_ADDR || size == 0)
712 continue;
713
714 /* Now consume following elements while they are contiguous */
715 for (; rlen >= np * sizeof(u32);
716 ranges += np, rlen -= np * 4) {
717 if (ranges[0] != pci_space)
718 break;
719 pci_next = of_read_number(ranges + 1, 2);
720 cpu_next = of_translate_address(dev, ranges + 3);
721 if (pci_next != pci_addr + size ||
722 cpu_next != cpu_addr + size)
723 break;
724 size += of_read_number(ranges + pna + 3, 2);
725 }
726
727 /* Act based on address space type */
728 res = NULL;
729 switch ((pci_space >> 24) & 0x3) {
730 case 1: /* PCI IO space */
731 printk(KERN_INFO
732 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
733 cpu_addr, cpu_addr + size - 1, pci_addr);
734
735 /* We support only one IO range */
736 if (hose->pci_io_size) {
737 printk(KERN_INFO
738 " \\--> Skipped (too many) !\n");
739 continue;
740 }
741 /* On 32 bits, limit I/O space to 16MB */
742 if (size > 0x01000000)
743 size = 0x01000000;
744
745 /* 32 bits needs to map IOs here */
746 hose->io_base_virt = ioremap(cpu_addr, size);
747
748 /* Expect trouble if pci_addr is not 0 */
749 if (primary)
750 isa_io_base =
751 (unsigned long)hose->io_base_virt;
752 /* pci_io_size and io_base_phys always represent IO
753 * space starting at 0 so we factor in pci_addr
754 */
755 hose->pci_io_size = pci_addr + size;
756 hose->io_base_phys = cpu_addr - pci_addr;
757
758 /* Build resource */
759 res = &hose->io_resource;
760 res->flags = IORESOURCE_IO;
761 res->start = pci_addr;
762 break;
763 case 2: /* PCI Memory space */
764 case 3: /* PCI 64 bits Memory space */
765 printk(KERN_INFO
766 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
767 cpu_addr, cpu_addr + size - 1, pci_addr,
768 (pci_space & 0x40000000) ? "Prefetch" : "");
769
770 /* We support only 3 memory ranges */
771 if (memno >= 3) {
772 printk(KERN_INFO
773 " \\--> Skipped (too many) !\n");
774 continue;
775 }
776 /* Handles ISA memory hole space here */
777 if (pci_addr == 0) {
778 isa_mb = cpu_addr;
779 isa_hole = memno;
780 if (primary || isa_mem_base == 0)
781 isa_mem_base = cpu_addr;
782 hose->isa_mem_phys = cpu_addr;
783 hose->isa_mem_size = size;
784 }
785
786 /* We get the PCI/Mem offset from the first range or
787 * the, current one if the offset came from an ISA
788 * hole. If they don't match, bugger.
789 */
790 if (memno == 0 ||
791 (isa_hole >= 0 && pci_addr != 0 &&
792 hose->pci_mem_offset == isa_mb))
793 hose->pci_mem_offset = cpu_addr - pci_addr;
794 else if (pci_addr != 0 &&
795 hose->pci_mem_offset != cpu_addr - pci_addr) {
796 printk(KERN_INFO
797 " \\--> Skipped (offset mismatch) !\n");
798 continue;
799 }
800
801 /* Build resource */
802 res = &hose->mem_resources[memno++];
803 res->flags = IORESOURCE_MEM;
804 if (pci_space & 0x40000000)
805 res->flags |= IORESOURCE_PREFETCH;
806 res->start = cpu_addr;
807 break;
808 }
809 if (res != NULL) {
810 res->name = dev->full_name;
811 res->end = res->start + size - 1;
812 res->parent = NULL;
813 res->sibling = NULL;
814 res->child = NULL;
815 }
816 }
817
818 /* If there's an ISA hole and the pci_mem_offset is -not- matching
819 * the ISA hole offset, then we need to remove the ISA hole from
820 * the resource list for that brige
821 */
822 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
823 unsigned int next = isa_hole + 1;
824 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
825 if (next < memno)
826 memmove(&hose->mem_resources[isa_hole],
827 &hose->mem_resources[next],
828 sizeof(struct resource) * (memno - next));
829 hose->mem_resources[--memno].flags = 0;
830 }
831}
832
833/* Decide whether to display the domain number in /proc */
834int pci_proc_domain(struct pci_bus *bus)
835{
836 struct pci_controller *hose = pci_bus_to_host(bus);
837
838 if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS))
839 return 0;
840 if (pci_flags & PCI_COMPAT_DOMAIN_0)
841 return hose->global_number != 0;
842 return 1;
843}
844
845void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
846 struct resource *res)
847{
848 resource_size_t offset = 0, mask = (resource_size_t)-1;
849 struct pci_controller *hose = pci_bus_to_host(dev->bus);
850
851 if (!hose)
852 return;
853 if (res->flags & IORESOURCE_IO) {
854 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
855 mask = 0xffffffffu;
856 } else if (res->flags & IORESOURCE_MEM)
857 offset = hose->pci_mem_offset;
858
859 region->start = (res->start - offset) & mask;
860 region->end = (res->end - offset) & mask;
861}
862EXPORT_SYMBOL(pcibios_resource_to_bus);
863
864void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
865 struct pci_bus_region *region)
866{
867 resource_size_t offset = 0, mask = (resource_size_t)-1;
868 struct pci_controller *hose = pci_bus_to_host(dev->bus);
869
870 if (!hose)
871 return;
872 if (res->flags & IORESOURCE_IO) {
873 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
874 mask = 0xffffffffu;
875 } else if (res->flags & IORESOURCE_MEM)
876 offset = hose->pci_mem_offset;
877 res->start = (region->start + offset) & mask;
878 res->end = (region->end + offset) & mask;
879}
880EXPORT_SYMBOL(pcibios_bus_to_resource);
881
882/* Fixup a bus resource into a linux resource */
883static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
884{
885 struct pci_controller *hose = pci_bus_to_host(dev->bus);
886 resource_size_t offset = 0, mask = (resource_size_t)-1;
887
888 if (res->flags & IORESOURCE_IO) {
889 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
890 mask = 0xffffffffu;
891 } else if (res->flags & IORESOURCE_MEM)
892 offset = hose->pci_mem_offset;
893
894 res->start = (res->start + offset) & mask;
895 res->end = (res->end + offset) & mask;
896}
897
898/* This header fixup will do the resource fixup for all devices as they are
899 * probed, but not for bridge ranges
900 */
901static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
902{
903 struct pci_controller *hose = pci_bus_to_host(dev->bus);
904 int i;
905
906 if (!hose) {
907 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
908 pci_name(dev));
909 return;
910 }
911 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
912 struct resource *res = dev->resource + i;
913 if (!res->flags)
914 continue;
915 /* On platforms that have PCI_PROBE_ONLY set, we don't
916 * consider 0 as an unassigned BAR value. It's technically
917 * a valid value, but linux doesn't like it... so when we can
918 * re-assign things, we do so, but if we can't, we keep it
919 * around and hope for the best...
920 */
921 if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) {
922 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
923 "is unassigned\n",
924 pci_name(dev), i,
925 (unsigned long long)res->start,
926 (unsigned long long)res->end,
927 (unsigned int)res->flags);
928 res->end -= res->start;
929 res->start = 0;
930 res->flags |= IORESOURCE_UNSET;
931 continue;
932 }
933
934 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
935 pci_name(dev), i,
936 (unsigned long long)res->start,\
937 (unsigned long long)res->end,
938 (unsigned int)res->flags);
939
940 fixup_resource(res, dev);
941
942 pr_debug("PCI:%s %016llx-%016llx\n",
943 pci_name(dev),
944 (unsigned long long)res->start,
945 (unsigned long long)res->end);
946 }
947}
948DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
949
950/* This function tries to figure out if a bridge resource has been initialized
951 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
952 * things go more smoothly when it gets it right. It should covers cases such
953 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
954 */
955static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
956 struct resource *res)
957{
958 struct pci_controller *hose = pci_bus_to_host(bus);
959 struct pci_dev *dev = bus->self;
960 resource_size_t offset;
961 u16 command;
962 int i;
963
964 /* We don't do anything if PCI_PROBE_ONLY is set */
965 if (pci_flags & PCI_PROBE_ONLY)
966 return 0;
967
968 /* Job is a bit different between memory and IO */
969 if (res->flags & IORESOURCE_MEM) {
970 /* If the BAR is non-0 (res != pci_mem_offset) then it's
971 * probably been initialized by somebody
972 */
973 if (res->start != hose->pci_mem_offset)
974 return 0;
975
976 /* The BAR is 0, let's check if memory decoding is enabled on
977 * the bridge. If not, we consider it unassigned
978 */
979 pci_read_config_word(dev, PCI_COMMAND, &command);
980 if ((command & PCI_COMMAND_MEMORY) == 0)
981 return 1;
982
983 /* Memory decoding is enabled and the BAR is 0. If any of
984 * the bridge resources covers that starting address (0 then
985 * it's good enough for us for memory
986 */
987 for (i = 0; i < 3; i++) {
988 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
989 hose->mem_resources[i].start == hose->pci_mem_offset)
990 return 0;
991 }
992
993 /* Well, it starts at 0 and we know it will collide so we may as
994 * well consider it as unassigned. That covers the Apple case.
995 */
996 return 1;
997 } else {
998 /* If the BAR is non-0, then we consider it assigned */
999 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1000 if (((res->start - offset) & 0xfffffffful) != 0)
1001 return 0;
1002
1003 /* Here, we are a bit different than memory as typically IO
1004 * space starting at low addresses -is- valid. What we do
1005 * instead if that we consider as unassigned anything that
1006 * doesn't have IO enabled in the PCI command register,
1007 * and that's it.
1008 */
1009 pci_read_config_word(dev, PCI_COMMAND, &command);
1010 if (command & PCI_COMMAND_IO)
1011 return 0;
1012
1013 /* It's starting at 0 and IO is disabled in the bridge, consider
1014 * it unassigned
1015 */
1016 return 1;
1017 }
1018}
1019
1020/* Fixup resources of a PCI<->PCI bridge */
1021static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1022{
1023 struct resource *res;
1024 int i;
1025
1026 struct pci_dev *dev = bus->self;
1027
1028 pci_bus_for_each_resource(bus, res, i) {
1029 res = bus->resource[i];
1030 if (!res)
1031 continue;
1032 if (!res->flags)
1033 continue;
1034 if (i >= 3 && bus->self->transparent)
1035 continue;
1036
1037 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
1038 pci_name(dev), i,
1039 (unsigned long long)res->start,\
1040 (unsigned long long)res->end,
1041 (unsigned int)res->flags);
1042
1043 /* Perform fixup */
1044 fixup_resource(res, dev);
1045
1046 /* Try to detect uninitialized P2P bridge resources,
1047 * and clear them out so they get re-assigned later
1048 */
1049 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1050 res->flags = 0;
1051 pr_debug("PCI:%s (unassigned)\n",
1052 pci_name(dev));
1053 } else {
1054 pr_debug("PCI:%s %016llx-%016llx\n",
1055 pci_name(dev),
1056 (unsigned long long)res->start,
1057 (unsigned long long)res->end);
1058 }
1059 }
1060}
1061
1062void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1063{
1064 /* Fix up the bus resources for P2P bridges */
1065 if (bus->self != NULL)
1066 pcibios_fixup_bridge(bus);
1067}
1068
1069void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1070{
1071 struct pci_dev *dev;
1072
1073 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1074 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1075
1076 list_for_each_entry(dev, &bus->devices, bus_list) {
1077 struct dev_archdata *sd = &dev->dev.archdata;
1078
1079 /* Setup OF node pointer in archdata */
1080 sd->of_node = pci_device_to_OF_node(dev);
1081
1082 /* Fixup NUMA node as it may not be setup yet by the generic
1083 * code and is needed by the DMA init
1084 */
1085 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1086
1087 /* Hook up default DMA ops */
1088 sd->dma_ops = pci_dma_ops;
1089 sd->dma_data = (void *)PCI_DRAM_OFFSET;
1090
1091 /* Read default IRQs and fixup if necessary */
1092 pci_read_irq_line(dev);
1093 }
1094}
1095
1096void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1097{
1098 /* When called from the generic PCI probe, read PCI<->PCI bridge
1099 * bases. This is -not- called when generating the PCI tree from
1100 * the OF device-tree.
1101 */
1102 if (bus->self != NULL)
1103 pci_read_bridge_bases(bus);
1104
1105 /* Now fixup the bus bus */
1106 pcibios_setup_bus_self(bus);
1107
1108 /* Now fixup devices on that bus */
1109 pcibios_setup_bus_devices(bus);
1110}
1111EXPORT_SYMBOL(pcibios_fixup_bus);
1112
1113static int skip_isa_ioresource_align(struct pci_dev *dev)
1114{
1115 if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) &&
1116 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1117 return 1;
1118 return 0;
1119}
1120
1121/*
1122 * We need to avoid collisions with `mirrored' VGA ports
1123 * and other strange ISA hardware, so we always want the
1124 * addresses to be allocated in the 0x000-0x0ff region
1125 * modulo 0x400.
1126 *
1127 * Why? Because some silly external IO cards only decode
1128 * the low 10 bits of the IO address. The 0x00-0xff region
1129 * is reserved for motherboard devices that decode all 16
1130 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1131 * but we want to try to avoid allocating at 0x2900-0x2bff
1132 * which might have be mirrored at 0x0100-0x03ff..
1133 */
1134resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1135 resource_size_t size, resource_size_t align)
1136{
1137 struct pci_dev *dev = data;
1138 resource_size_t start = res->start;
1139
1140 if (res->flags & IORESOURCE_IO) {
1141 if (skip_isa_ioresource_align(dev))
1142 return start;
1143 if (start & 0x300)
1144 start = (start + 0x3ff) & ~0x3ff;
1145 }
1146
1147 return start;
1148}
1149EXPORT_SYMBOL(pcibios_align_resource);
1150
1151/*
1152 * Reparent resource children of pr that conflict with res
1153 * under res, and make res replace those children.
1154 */
1155static int __init reparent_resources(struct resource *parent,
1156 struct resource *res)
1157{
1158 struct resource *p, **pp;
1159 struct resource **firstpp = NULL;
1160
1161 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1162 if (p->end < res->start)
1163 continue;
1164 if (res->end < p->start)
1165 break;
1166 if (p->start < res->start || p->end > res->end)
1167 return -1; /* not completely contained */
1168 if (firstpp == NULL)
1169 firstpp = pp;
1170 }
1171 if (firstpp == NULL)
1172 return -1; /* didn't find any conflicting entries? */
1173 res->parent = parent;
1174 res->child = *firstpp;
1175 res->sibling = *pp;
1176 *firstpp = res;
1177 *pp = NULL;
1178 for (p = res->child; p != NULL; p = p->sibling) {
1179 p->parent = res;
1180 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1181 p->name,
1182 (unsigned long long)p->start,
1183 (unsigned long long)p->end, res->name);
1184 }
1185 return 0;
1186}
1187
1188/*
1189 * Handle resources of PCI devices. If the world were perfect, we could
1190 * just allocate all the resource regions and do nothing more. It isn't.
1191 * On the other hand, we cannot just re-allocate all devices, as it would
1192 * require us to know lots of host bridge internals. So we attempt to
1193 * keep as much of the original configuration as possible, but tweak it
1194 * when it's found to be wrong.
1195 *
1196 * Known BIOS problems we have to work around:
1197 * - I/O or memory regions not configured
1198 * - regions configured, but not enabled in the command register
1199 * - bogus I/O addresses above 64K used
1200 * - expansion ROMs left enabled (this may sound harmless, but given
1201 * the fact the PCI specs explicitly allow address decoders to be
1202 * shared between expansion ROMs and other resource regions, it's
1203 * at least dangerous)
1204 *
1205 * Our solution:
1206 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1207 * This gives us fixed barriers on where we can allocate.
1208 * (2) Allocate resources for all enabled devices. If there is
1209 * a collision, just mark the resource as unallocated. Also
1210 * disable expansion ROMs during this step.
1211 * (3) Try to allocate resources for disabled devices. If the
1212 * resources were assigned correctly, everything goes well,
1213 * if they weren't, they won't disturb allocation of other
1214 * resources.
1215 * (4) Assign new addresses to resources which were either
1216 * not configured at all or misconfigured. If explicitly
1217 * requested by the user, configure expansion ROM address
1218 * as well.
1219 */
1220
1221void pcibios_allocate_bus_resources(struct pci_bus *bus)
1222{
1223 struct pci_bus *b;
1224 int i;
1225 struct resource *res, *pr;
1226
1227 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1228 pci_domain_nr(bus), bus->number);
1229
1230 pci_bus_for_each_resource(bus, res, i) {
1231 res = bus->resource[i];
1232 if (!res || !res->flags
1233 || res->start > res->end || res->parent)
1234 continue;
1235 if (bus->parent == NULL)
1236 pr = (res->flags & IORESOURCE_IO) ?
1237 &ioport_resource : &iomem_resource;
1238 else {
1239 /* Don't bother with non-root busses when
1240 * re-assigning all resources. We clear the
1241 * resource flags as if they were colliding
1242 * and as such ensure proper re-allocation
1243 * later.
1244 */
1245 if (pci_flags & PCI_REASSIGN_ALL_RSRC)
1246 goto clear_resource;
1247 pr = pci_find_parent_resource(bus->self, res);
1248 if (pr == res) {
1249 /* this happens when the generic PCI
1250 * code (wrongly) decides that this
1251 * bridge is transparent -- paulus
1252 */
1253 continue;
1254 }
1255 }
1256
1257 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1258 "[0x%x], parent %p (%s)\n",
1259 bus->self ? pci_name(bus->self) : "PHB",
1260 bus->number, i,
1261 (unsigned long long)res->start,
1262 (unsigned long long)res->end,
1263 (unsigned int)res->flags,
1264 pr, (pr && pr->name) ? pr->name : "nil");
1265
1266 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1267 if (request_resource(pr, res) == 0)
1268 continue;
1269 /*
1270 * Must be a conflict with an existing entry.
1271 * Move that entry (or entries) under the
1272 * bridge resource and try again.
1273 */
1274 if (reparent_resources(pr, res) == 0)
1275 continue;
1276 }
1277 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1278 "%d of PCI bridge %d, will remap\n", i, bus->number);
1279clear_resource:
1280 res->flags = 0;
1281 }
1282
1283 list_for_each_entry(b, &bus->children, node)
1284 pcibios_allocate_bus_resources(b);
1285}
1286
1287static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1288{
1289 struct resource *pr, *r = &dev->resource[idx];
1290
1291 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1292 pci_name(dev), idx,
1293 (unsigned long long)r->start,
1294 (unsigned long long)r->end,
1295 (unsigned int)r->flags);
1296
1297 pr = pci_find_parent_resource(dev, r);
1298 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1299 request_resource(pr, r) < 0) {
1300 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1301 " of device %s, will remap\n", idx, pci_name(dev));
1302 if (pr)
1303 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1304 pr,
1305 (unsigned long long)pr->start,
1306 (unsigned long long)pr->end,
1307 (unsigned int)pr->flags);
1308 /* We'll assign a new address later */
1309 r->flags |= IORESOURCE_UNSET;
1310 r->end -= r->start;
1311 r->start = 0;
1312 }
1313}
1314
1315static void __init pcibios_allocate_resources(int pass)
1316{
1317 struct pci_dev *dev = NULL;
1318 int idx, disabled;
1319 u16 command;
1320 struct resource *r;
1321
1322 for_each_pci_dev(dev) {
1323 pci_read_config_word(dev, PCI_COMMAND, &command);
1324 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1325 r = &dev->resource[idx];
1326 if (r->parent) /* Already allocated */
1327 continue;
1328 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1329 continue; /* Not assigned at all */
1330 /* We only allocate ROMs on pass 1 just in case they
1331 * have been screwed up by firmware
1332 */
1333 if (idx == PCI_ROM_RESOURCE)
1334 disabled = 1;
1335 if (r->flags & IORESOURCE_IO)
1336 disabled = !(command & PCI_COMMAND_IO);
1337 else
1338 disabled = !(command & PCI_COMMAND_MEMORY);
1339 if (pass == disabled)
1340 alloc_resource(dev, idx);
1341 }
1342 if (pass)
1343 continue;
1344 r = &dev->resource[PCI_ROM_RESOURCE];
1345 if (r->flags) {
1346 /* Turn the ROM off, leave the resource region,
1347 * but keep it unregistered.
1348 */
1349 u32 reg;
1350 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1351 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1352 pr_debug("PCI: Switching off ROM of %s\n",
1353 pci_name(dev));
1354 r->flags &= ~IORESOURCE_ROM_ENABLE;
1355 pci_write_config_dword(dev, dev->rom_base_reg,
1356 reg & ~PCI_ROM_ADDRESS_ENABLE);
1357 }
1358 }
1359 }
1360}
1361
1362static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1363{
1364 struct pci_controller *hose = pci_bus_to_host(bus);
1365 resource_size_t offset;
1366 struct resource *res, *pres;
1367 int i;
1368
1369 pr_debug("Reserving legacy ranges for domain %04x\n",
1370 pci_domain_nr(bus));
1371
1372 /* Check for IO */
1373 if (!(hose->io_resource.flags & IORESOURCE_IO))
1374 goto no_io;
1375 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1376 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1377 BUG_ON(res == NULL);
1378 res->name = "Legacy IO";
1379 res->flags = IORESOURCE_IO;
1380 res->start = offset;
1381 res->end = (offset + 0xfff) & 0xfffffffful;
1382 pr_debug("Candidate legacy IO: %pR\n", res);
1383 if (request_resource(&hose->io_resource, res)) {
1384 printk(KERN_DEBUG
1385 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1386 pci_domain_nr(bus), bus->number, res);
1387 kfree(res);
1388 }
1389
1390 no_io:
1391 /* Check for memory */
1392 offset = hose->pci_mem_offset;
1393 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1394 for (i = 0; i < 3; i++) {
1395 pres = &hose->mem_resources[i];
1396 if (!(pres->flags & IORESOURCE_MEM))
1397 continue;
1398 pr_debug("hose mem res: %pR\n", pres);
1399 if ((pres->start - offset) <= 0xa0000 &&
1400 (pres->end - offset) >= 0xbffff)
1401 break;
1402 }
1403 if (i >= 3)
1404 return;
1405 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1406 BUG_ON(res == NULL);
1407 res->name = "Legacy VGA memory";
1408 res->flags = IORESOURCE_MEM;
1409 res->start = 0xa0000 + offset;
1410 res->end = 0xbffff + offset;
1411 pr_debug("Candidate VGA memory: %pR\n", res);
1412 if (request_resource(pres, res)) {
1413 printk(KERN_DEBUG
1414 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1415 pci_domain_nr(bus), bus->number, res);
1416 kfree(res);
1417 }
1418}
1419
1420void __init pcibios_resource_survey(void)
1421{
1422 struct pci_bus *b;
1423
1424 /* Allocate and assign resources. If we re-assign everything, then
1425 * we skip the allocate phase
1426 */
1427 list_for_each_entry(b, &pci_root_buses, node)
1428 pcibios_allocate_bus_resources(b);
1429
1430 if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) {
1431 pcibios_allocate_resources(0);
1432 pcibios_allocate_resources(1);
1433 }
1434
1435 /* Before we start assigning unassigned resource, we try to reserve
1436 * the low IO area and the VGA memory area if they intersect the
1437 * bus available resources to avoid allocating things on top of them
1438 */
1439 if (!(pci_flags & PCI_PROBE_ONLY)) {
1440 list_for_each_entry(b, &pci_root_buses, node)
1441 pcibios_reserve_legacy_regions(b);
1442 }
1443
1444 /* Now, if the platform didn't decide to blindly trust the firmware,
1445 * we proceed to assigning things that were left unassigned
1446 */
1447 if (!(pci_flags & PCI_PROBE_ONLY)) {
1448 pr_debug("PCI: Assigning unassigned resources...\n");
1449 pci_assign_unassigned_resources();
1450 }
1451}
1452
1453#ifdef CONFIG_HOTPLUG
1454
1455/* This is used by the PCI hotplug driver to allocate resource
1456 * of newly plugged busses. We can try to consolidate with the
1457 * rest of the code later, for now, keep it as-is as our main
1458 * resource allocation function doesn't deal with sub-trees yet.
1459 */
1460void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1461{
1462 struct pci_dev *dev;
1463 struct pci_bus *child_bus;
1464
1465 list_for_each_entry(dev, &bus->devices, bus_list) {
1466 int i;
1467
1468 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1469 struct resource *r = &dev->resource[i];
1470
1471 if (r->parent || !r->start || !r->flags)
1472 continue;
1473
1474 pr_debug("PCI: Claiming %s: "
1475 "Resource %d: %016llx..%016llx [%x]\n",
1476 pci_name(dev), i,
1477 (unsigned long long)r->start,
1478 (unsigned long long)r->end,
1479 (unsigned int)r->flags);
1480
1481 pci_claim_resource(dev, i);
1482 }
1483 }
1484
1485 list_for_each_entry(child_bus, &bus->children, node)
1486 pcibios_claim_one_bus(child_bus);
1487}
1488EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1489
1490
1491/* pcibios_finish_adding_to_bus
1492 *
1493 * This is to be called by the hotplug code after devices have been
1494 * added to a bus, this include calling it for a PHB that is just
1495 * being added
1496 */
1497void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1498{
1499 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1500 pci_domain_nr(bus), bus->number);
1501
1502 /* Allocate bus and devices resources */
1503 pcibios_allocate_bus_resources(bus);
1504 pcibios_claim_one_bus(bus);
1505
1506 /* Add new devices to global lists. Register in proc, sysfs. */
1507 pci_bus_add_devices(bus);
1508
1509 /* Fixup EEH */
1510 /* eeh_add_device_tree_late(bus); */
1511}
1512EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1513
1514#endif /* CONFIG_HOTPLUG */
1515
1516int pcibios_enable_device(struct pci_dev *dev, int mask)
1517{
1518 return pci_enable_resources(dev, mask);
1519}
1520
1521void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1522{
1523 struct pci_bus *bus = hose->bus;
1524 struct resource *res;
1525 int i;
1526
1527 /* Hookup PHB IO resource */
1528 bus->resource[0] = res = &hose->io_resource;
1529
1530 if (!res->flags) {
1531 printk(KERN_WARNING "PCI: I/O resource not set for host"
1532 " bridge %s (domain %d)\n",
1533 hose->dn->full_name, hose->global_number);
1534 /* Workaround for lack of IO resource only on 32-bit */
1535 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1536 res->end = res->start + IO_SPACE_LIMIT;
1537 res->flags = IORESOURCE_IO;
1538 }
1539
1540 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1541 (unsigned long long)res->start,
1542 (unsigned long long)res->end,
1543 (unsigned long)res->flags);
1544
1545 /* Hookup PHB Memory resources */
1546 for (i = 0; i < 3; ++i) {
1547 res = &hose->mem_resources[i];
1548 if (!res->flags) {
1549 if (i > 0)
1550 continue;
1551 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1552 "host bridge %s (domain %d)\n",
1553 hose->dn->full_name, hose->global_number);
1554
1555 /* Workaround for lack of MEM resource only on 32-bit */
1556 res->start = hose->pci_mem_offset;
1557 res->end = (resource_size_t)-1LL;
1558 res->flags = IORESOURCE_MEM;
1559
1560 }
1561 bus->resource[i+1] = res;
1562
1563 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1564 i, (unsigned long long)res->start,
1565 (unsigned long long)res->end,
1566 (unsigned long)res->flags);
1567 }
1568
1569 pr_debug("PCI: PHB MEM offset = %016llx\n",
1570 (unsigned long long)hose->pci_mem_offset);
1571 pr_debug("PCI: PHB IO offset = %08lx\n",
1572 (unsigned long)hose->io_base_virt - _IO_BASE);
1573}
1574
1575/*
1576 * Null PCI config access functions, for the case when we can't
1577 * find a hose.
1578 */
1579#define NULL_PCI_OP(rw, size, type) \
1580static int \
1581null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1582{ \
1583 return PCIBIOS_DEVICE_NOT_FOUND; \
1584}
1585
1586static int
1587null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1588 int len, u32 *val)
1589{
1590 return PCIBIOS_DEVICE_NOT_FOUND;
1591}
1592
1593static int
1594null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1595 int len, u32 val)
1596{
1597 return PCIBIOS_DEVICE_NOT_FOUND;
1598}
1599
1600static struct pci_ops null_pci_ops = {
1601 .read = null_read_config,
1602 .write = null_write_config,
1603};
1604
1605/*
1606 * These functions are used early on before PCI scanning is done
1607 * and all of the pci_dev and pci_bus structures have been created.
1608 */
1609static struct pci_bus *
1610fake_pci_bus(struct pci_controller *hose, int busnr)
1611{
1612 static struct pci_bus bus;
1613
1614 if (!hose)
1615 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1616
1617 bus.number = busnr;
1618 bus.sysdata = hose;
1619 bus.ops = hose ? hose->ops : &null_pci_ops;
1620 return &bus;
1621}
1622
1623#define EARLY_PCI_OP(rw, size, type) \
1624int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1625 int devfn, int offset, type value) \
1626{ \
1627 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1628 devfn, offset, value); \
1629}
1630
1631EARLY_PCI_OP(read, byte, u8 *)
1632EARLY_PCI_OP(read, word, u16 *)
1633EARLY_PCI_OP(read, dword, u32 *)
1634EARLY_PCI_OP(write, byte, u8)
1635EARLY_PCI_OP(write, word, u16)
1636EARLY_PCI_OP(write, dword, u32)
1637
1638int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1639 int cap)
1640{
1641 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1642}
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
new file mode 100644
index 000000000000..3c3d808d7ce0
--- /dev/null
+++ b/arch/microblaze/pci/pci_32.c
@@ -0,0 +1,431 @@
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14#include <linux/irq.h>
15#include <linux/list.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18
19#include <asm/processor.h>
20#include <asm/io.h>
21#include <asm/prom.h>
22#include <asm/sections.h>
23#include <asm/pci-bridge.h>
24#include <asm/byteorder.h>
25#include <asm/uaccess.h>
26
27#undef DEBUG
28
29unsigned long isa_io_base;
30unsigned long pci_dram_offset;
31int pcibios_assign_bus_offset = 1;
32
33static u8 *pci_to_OF_bus_map;
34
35/* By default, we don't re-assign bus numbers. We do this only on
36 * some pmacs
37 */
38static int pci_assign_all_buses;
39
40static int pci_bus_count;
41
42/*
43 * Functions below are used on OpenFirmware machines.
44 */
45static void
46make_one_node_map(struct device_node *node, u8 pci_bus)
47{
48 const int *bus_range;
49 int len;
50
51 if (pci_bus >= pci_bus_count)
52 return;
53 bus_range = of_get_property(node, "bus-range", &len);
54 if (bus_range == NULL || len < 2 * sizeof(int)) {
55 printk(KERN_WARNING "Can't get bus-range for %s, "
56 "assuming it starts at 0\n", node->full_name);
57 pci_to_OF_bus_map[pci_bus] = 0;
58 } else
59 pci_to_OF_bus_map[pci_bus] = bus_range[0];
60
61 for_each_child_of_node(node, node) {
62 struct pci_dev *dev;
63 const unsigned int *class_code, *reg;
64
65 class_code = of_get_property(node, "class-code", NULL);
66 if (!class_code ||
67 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
68 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
69 continue;
70 reg = of_get_property(node, "reg", NULL);
71 if (!reg)
72 continue;
73 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
74 if (!dev || !dev->subordinate) {
75 pci_dev_put(dev);
76 continue;
77 }
78 make_one_node_map(node, dev->subordinate->number);
79 pci_dev_put(dev);
80 }
81}
82
83void
84pcibios_make_OF_bus_map(void)
85{
86 int i;
87 struct pci_controller *hose, *tmp;
88 struct property *map_prop;
89 struct device_node *dn;
90
91 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
92 if (!pci_to_OF_bus_map) {
93 printk(KERN_ERR "Can't allocate OF bus map !\n");
94 return;
95 }
96
97 /* We fill the bus map with invalid values, that helps
98 * debugging.
99 */
100 for (i = 0; i < pci_bus_count; i++)
101 pci_to_OF_bus_map[i] = 0xff;
102
103 /* For each hose, we begin searching bridges */
104 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
105 struct device_node *node = hose->dn;
106
107 if (!node)
108 continue;
109 make_one_node_map(node, hose->first_busno);
110 }
111 dn = of_find_node_by_path("/");
112 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
113 if (map_prop) {
114 BUG_ON(pci_bus_count > map_prop->length);
115 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
116 }
117 of_node_put(dn);
118#ifdef DEBUG
119 printk(KERN_INFO "PCI->OF bus map:\n");
120 for (i = 0; i < pci_bus_count; i++) {
121 if (pci_to_OF_bus_map[i] == 0xff)
122 continue;
123 printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
124 }
125#endif
126}
127
128typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
129
130static struct device_node *scan_OF_pci_childs(struct device_node *parent,
131 pci_OF_scan_iterator filter, void *data)
132{
133 struct device_node *node;
134 struct device_node *sub_node;
135
136 for_each_child_of_node(parent, node) {
137 const unsigned int *class_code;
138
139 if (filter(node, data)) {
140 of_node_put(node);
141 return node;
142 }
143
144 /* For PCI<->PCI bridges or CardBus bridges, we go down
145 * Note: some OFs create a parent node "multifunc-device" as
146 * a fake root for all functions of a multi-function device,
147 * we go down them as well.
148 */
149 class_code = of_get_property(node, "class-code", NULL);
150 if ((!class_code ||
151 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
152 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
153 strcmp(node->name, "multifunc-device"))
154 continue;
155 sub_node = scan_OF_pci_childs(node, filter, data);
156 if (sub_node) {
157 of_node_put(node);
158 return sub_node;
159 }
160 }
161 return NULL;
162}
163
164static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
165 unsigned int devfn)
166{
167 struct device_node *np, *cnp;
168 const u32 *reg;
169 unsigned int psize;
170
171 for_each_child_of_node(parent, np) {
172 reg = of_get_property(np, "reg", &psize);
173 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
174 return np;
175
176 /* Note: some OFs create a parent node "multifunc-device" as
177 * a fake root for all functions of a multi-function device,
178 * we go down them as well. */
179 if (!strcmp(np->name, "multifunc-device")) {
180 cnp = scan_OF_for_pci_dev(np, devfn);
181 if (cnp)
182 return cnp;
183 }
184 }
185 return NULL;
186}
187
188
189static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
190{
191 struct device_node *parent, *np;
192
193 /* Are we a root bus ? */
194 if (bus->self == NULL || bus->parent == NULL) {
195 struct pci_controller *hose = pci_bus_to_host(bus);
196 if (hose == NULL)
197 return NULL;
198 return of_node_get(hose->dn);
199 }
200
201 /* not a root bus, we need to get our parent */
202 parent = scan_OF_for_pci_bus(bus->parent);
203 if (parent == NULL)
204 return NULL;
205
206 /* now iterate for children for a match */
207 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
208 of_node_put(parent);
209
210 return np;
211}
212
213/*
214 * Scans the OF tree for a device node matching a PCI device
215 */
216struct device_node *
217pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
218{
219 struct device_node *parent, *np;
220
221 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
222 parent = scan_OF_for_pci_bus(bus);
223 if (parent == NULL)
224 return NULL;
225 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
226 np = scan_OF_for_pci_dev(parent, devfn);
227 of_node_put(parent);
228 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
229
230 /* XXX most callers don't release the returned node
231 * mostly because ppc64 doesn't increase the refcount,
232 * we need to fix that.
233 */
234 return np;
235}
236EXPORT_SYMBOL(pci_busdev_to_OF_node);
237
238struct device_node*
239pci_device_to_OF_node(struct pci_dev *dev)
240{
241 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
242}
243EXPORT_SYMBOL(pci_device_to_OF_node);
244
245static int
246find_OF_pci_device_filter(struct device_node *node, void *data)
247{
248 return ((void *)node == data);
249}
250
251/*
252 * Returns the PCI device matching a given OF node
253 */
254int
255pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
256{
257 const unsigned int *reg;
258 struct pci_controller *hose;
259 struct pci_dev *dev = NULL;
260
261 /* Make sure it's really a PCI device */
262 hose = pci_find_hose_for_OF_device(node);
263 if (!hose || !hose->dn)
264 return -ENODEV;
265 if (!scan_OF_pci_childs(hose->dn,
266 find_OF_pci_device_filter, (void *)node))
267 return -ENODEV;
268 reg = of_get_property(node, "reg", NULL);
269 if (!reg)
270 return -ENODEV;
271 *bus = (reg[0] >> 16) & 0xff;
272 *devfn = ((reg[0] >> 8) & 0xff);
273
274 /* Ok, here we need some tweak. If we have already renumbered
275 * all busses, we can't rely on the OF bus number any more.
276 * the pci_to_OF_bus_map is not enough as several PCI busses
277 * may match the same OF bus number.
278 */
279 if (!pci_to_OF_bus_map)
280 return 0;
281
282 for_each_pci_dev(dev)
283 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
284 dev->devfn == *devfn) {
285 *bus = dev->bus->number;
286 pci_dev_put(dev);
287 return 0;
288 }
289
290 return -ENODEV;
291}
292EXPORT_SYMBOL(pci_device_from_OF_node);
293
294/* We create the "pci-OF-bus-map" property now so it appears in the
295 * /proc device tree
296 */
297void __init
298pci_create_OF_bus_map(void)
299{
300 struct property *of_prop;
301 struct device_node *dn;
302
303 of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
304 256);
305 if (!of_prop)
306 return;
307 dn = of_find_node_by_path("/");
308 if (dn) {
309 memset(of_prop, -1, sizeof(struct property) + 256);
310 of_prop->name = "pci-OF-bus-map";
311 of_prop->length = 256;
312 of_prop->value = &of_prop[1];
313 prom_add_property(dn, of_prop);
314 of_node_put(dn);
315 }
316}
317
318static void __devinit pcibios_scan_phb(struct pci_controller *hose)
319{
320 struct pci_bus *bus;
321 struct device_node *node = hose->dn;
322 unsigned long io_offset;
323 struct resource *res = &hose->io_resource;
324
325 pr_debug("PCI: Scanning PHB %s\n",
326 node ? node->full_name : "<NO NAME>");
327
328 /* Create an empty bus for the toplevel */
329 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
330 if (bus == NULL) {
331 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
332 hose->global_number);
333 return;
334 }
335 bus->secondary = hose->first_busno;
336 hose->bus = bus;
337
338 /* Fixup IO space offset */
339 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
340 res->start = (res->start + io_offset) & 0xffffffffu;
341 res->end = (res->end + io_offset) & 0xffffffffu;
342
343 /* Wire up PHB bus resources */
344 pcibios_setup_phb_resources(hose);
345
346 /* Scan children */
347 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
348}
349
350static int __init pcibios_init(void)
351{
352 struct pci_controller *hose, *tmp;
353 int next_busno = 0;
354
355 printk(KERN_INFO "PCI: Probing PCI hardware\n");
356
357 if (pci_flags & PCI_REASSIGN_ALL_BUS) {
358 printk(KERN_INFO "setting pci_asign_all_busses\n");
359 pci_assign_all_buses = 1;
360 }
361
362 /* Scan all of the recorded PCI controllers. */
363 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
364 if (pci_assign_all_buses)
365 hose->first_busno = next_busno;
366 hose->last_busno = 0xff;
367 pcibios_scan_phb(hose);
368 printk(KERN_INFO "calling pci_bus_add_devices()\n");
369 pci_bus_add_devices(hose->bus);
370 if (pci_assign_all_buses || next_busno <= hose->last_busno)
371 next_busno = hose->last_busno + \
372 pcibios_assign_bus_offset;
373 }
374 pci_bus_count = next_busno;
375
376 /* OpenFirmware based machines need a map of OF bus
377 * numbers vs. kernel bus numbers since we may have to
378 * remap them.
379 */
380 if (pci_assign_all_buses)
381 pcibios_make_OF_bus_map();
382
383 /* Call common code to handle resource allocation */
384 pcibios_resource_survey();
385
386 return 0;
387}
388
389subsys_initcall(pcibios_init);
390
391static struct pci_controller*
392pci_bus_to_hose(int bus)
393{
394 struct pci_controller *hose, *tmp;
395
396 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
397 if (bus >= hose->first_busno && bus <= hose->last_busno)
398 return hose;
399 return NULL;
400}
401
402/* Provide information on locations of various I/O regions in physical
403 * memory. Do this on a per-card basis so that we choose the right
404 * root bridge.
405 * Note that the returned IO or memory base is a physical address
406 */
407
408long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
409{
410 struct pci_controller *hose;
411 long result = -EOPNOTSUPP;
412
413 hose = pci_bus_to_hose(bus);
414 if (!hose)
415 return -ENODEV;
416
417 switch (which) {
418 case IOBASE_BRIDGE_NUMBER:
419 return (long)hose->first_busno;
420 case IOBASE_MEMORY:
421 return (long)hose->pci_mem_offset;
422 case IOBASE_IO:
423 return (long)hose->io_base_phys;
424 case IOBASE_ISA_IO:
425 return (long)isa_io_base;
426 case IOBASE_ISA_MEM:
427 return (long)isa_mem_base;
428 }
429
430 return result;
431}
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
new file mode 100644
index 000000000000..7869a41b0f94
--- /dev/null
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -0,0 +1,168 @@
1/*
2 * PCI support for Xilinx plbv46_pci soft-core which can be used on
3 * Xilinx Virtex ML410 / ML510 boards.
4 *
5 * Copyright 2009 Roderick Colenbrander
6 * Copyright 2009 Secret Lab Technologies Ltd.
7 *
8 * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
9 * by Benjamin Herrenschmidt.
10 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/ioport.h>
18#include <linux/of.h>
19#include <linux/pci.h>
20#include <asm/io.h>
21
22#define XPLB_PCI_ADDR 0x10c
23#define XPLB_PCI_DATA 0x110
24#define XPLB_PCI_BUS 0x114
25
26#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
27 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
28
29static struct of_device_id xilinx_pci_match[] = {
30 { .compatible = "xlnx,plbv46-pci-1.03.a", },
31 {}
32};
33
34/**
35 * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
36 */
37static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
38{
39 struct pci_controller *hose;
40 int i;
41
42 if (dev->devfn || dev->bus->self)
43 return;
44
45 hose = pci_bus_to_host(dev->bus);
46 if (!hose)
47 return;
48
49 if (!of_match_node(xilinx_pci_match, hose->dn))
50 return;
51
52 /* Hide the PCI host BARs from the kernel as their content doesn't
53 * fit well in the resource management
54 */
55 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
56 dev->resource[i].start = 0;
57 dev->resource[i].end = 0;
58 dev->resource[i].flags = 0;
59 }
60
61 dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
62 pci_name(dev));
63}
64DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
65
66#ifdef DEBUG
67/**
68 * xilinx_pci_exclude_device - Don't do config access for non-root bus
69 *
70 * This is a hack. Config access to any bus other than bus 0 does not
71 * currently work on the ML510 so we prevent it here.
72 */
73static int
74xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
75{
76 return (bus != 0);
77}
78
79/**
80 * xilinx_early_pci_scan - List pci config space for available devices
81 *
82 * List pci devices in very early phase.
83 */
84void __init xilinx_early_pci_scan(struct pci_controller *hose)
85{
86 u32 bus = 0;
87 u32 val, dev, func, offset;
88
89 /* Currently we have only 2 device connected - up-to 32 devices */
90 for (dev = 0; dev < 2; dev++) {
91 /* List only first function number - up-to 8 functions */
92 for (func = 0; func < 1; func++) {
93 printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
94 /* read the first 64 standardized bytes */
95 /* Up-to 192 bytes can be list of capabilities */
96 for (offset = 0; offset < 64; offset += 4) {
97 early_read_config_dword(hose, bus,
98 PCI_DEVFN(dev, func), offset, &val);
99 if (offset == 0 && val == 0xFFFFFFFF) {
100 printk(KERN_CONT "\nABSENT");
101 break;
102 }
103 if (!(offset % 0x10))
104 printk(KERN_CONT "\n%04x: ", offset);
105
106 printk(KERN_CONT "%08x ", val);
107 }
108 printk(KERN_INFO "\n");
109 }
110 }
111}
112#else
113void __init xilinx_early_pci_scan(struct pci_controller *hose)
114{
115}
116#endif
117
118/**
119 * xilinx_pci_init - Find and register a Xilinx PCI host bridge
120 */
121void __init xilinx_pci_init(void)
122{
123 struct pci_controller *hose;
124 struct resource r;
125 void __iomem *pci_reg;
126 struct device_node *pci_node;
127
128 pci_node = of_find_matching_node(NULL, xilinx_pci_match);
129 if (!pci_node)
130 return;
131
132 if (of_address_to_resource(pci_node, 0, &r)) {
133 pr_err("xilinx-pci: cannot resolve base address\n");
134 return;
135 }
136
137 hose = pcibios_alloc_controller(pci_node);
138 if (!hose) {
139 pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
140 return;
141 }
142
143 /* Setup config space */
144 setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
145 r.start + XPLB_PCI_DATA,
146 INDIRECT_TYPE_SET_CFG_TYPE);
147
148 /* According to the xilinx plbv46_pci documentation the soft-core starts
149 * a self-init when the bus master enable bit is set. Without this bit
150 * set the pci bus can't be scanned.
151 */
152 early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
153
154 /* Set the max latency timer to 255 */
155 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
156
157 /* Set the max bus number to 255, and bus/subbus no's to 0 */
158 pci_reg = of_iomap(pci_node, 0);
159 out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
160 iounmap(pci_reg);
161
162 /* Register the host bridge with the linux kernel! */
163 pci_process_bridge_OF_ranges(hose, pci_node,
164 INDIRECT_TYPE_SET_CFG_TYPE);
165
166 pr_info("xilinx-pci: Registered PCI host bridge\n");
167 xilinx_early_pci_scan(hose);
168}
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
index 8e9b4752d3ff..669c7eec293e 100644
--- a/arch/microblaze/platform/Kconfig.platform
+++ b/arch/microblaze/platform/Kconfig.platform
@@ -53,31 +53,12 @@ config OPT_LIB_FUNCTION
53 53
54config OPT_LIB_ASM 54config OPT_LIB_ASM
55 bool "Optimalized lib function ASM" 55 bool "Optimalized lib function ASM"
56 depends on OPT_LIB_FUNCTION 56 depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
57 default n 57 default n
58 help 58 help
59 Allows turn on optimalized library function (memcpy and memmove). 59 Allows turn on optimalized library function (memcpy and memmove).
60 Function are written in asm code. 60 Function are written in asm code.
61 61
62# This is still a bit broken - disabling for now JW 20070504
63config ALLOW_EDIT_AUTO
64 bool "Permit Display/edit of Kconfig.auto platform settings"
65 default n
66 help
67 Allows the editing of auto-generated platform settings from
68 the Kconfig.auto file. Obviously this does not change the
69 underlying hardware, so be very careful if you go editing
70 these settings.
71
72 Also, if you enable this, and edit various Kconfig.auto
73 settings, YOUR CHANGES WILL BE LOST if you then disable it
74 again. You have been warned!
75
76 If unsure, say no.
77
78comment "Automatic platform settings from Kconfig.auto"
79 depends on ALLOW_EDIT_AUTO
80
81if PLATFORM_GENERIC=y 62if PLATFORM_GENERIC=y
82 source "arch/microblaze/platform/generic/Kconfig.auto" 63 source "arch/microblaze/platform/generic/Kconfig.auto"
83endif 64endif
diff --git a/arch/microblaze/platform/generic/Kconfig.auto b/arch/microblaze/platform/generic/Kconfig.auto
index fbca22d9c8b9..5d86fc19029d 100644
--- a/arch/microblaze/platform/generic/Kconfig.auto
+++ b/arch/microblaze/platform/generic/Kconfig.auto
@@ -21,7 +21,6 @@
21 21
22# Definitions for MICROBLAZE0 22# Definitions for MICROBLAZE0
23comment "Definitions for MICROBLAZE0" 23comment "Definitions for MICROBLAZE0"
24 depends on ALLOW_EDIT_AUTO
25 24
26config KERNEL_BASE_ADDR 25config KERNEL_BASE_ADDR
27 hex "Physical address where Linux Kernel is" 26 hex "Physical address where Linux Kernel is"
@@ -30,33 +29,33 @@ config KERNEL_BASE_ADDR
30 BASE Address for kernel 29 BASE Address for kernel
31 30
32config XILINX_MICROBLAZE0_FAMILY 31config XILINX_MICROBLAZE0_FAMILY
33 string "Targetted FPGA family" if ALLOW_EDIT_AUTO 32 string "Targetted FPGA family"
34 default "virtex5" 33 default "virtex5"
35 34
36config XILINX_MICROBLAZE0_USE_MSR_INSTR 35config XILINX_MICROBLAZE0_USE_MSR_INSTR
37 int "USE_MSR_INSTR range (0:1)" if ALLOW_EDIT_AUTO 36 int "USE_MSR_INSTR range (0:1)"
38 default 1 37 default 0
39 38
40config XILINX_MICROBLAZE0_USE_PCMP_INSTR 39config XILINX_MICROBLAZE0_USE_PCMP_INSTR
41 int "USE_PCMP_INSTR range (0:1)" if ALLOW_EDIT_AUTO 40 int "USE_PCMP_INSTR range (0:1)"
42 default 1 41 default 0
43 42
44config XILINX_MICROBLAZE0_USE_BARREL 43config XILINX_MICROBLAZE0_USE_BARREL
45 int "USE_BARREL range (0:1)" if ALLOW_EDIT_AUTO 44 int "USE_BARREL range (0:1)"
46 default 1 45 default 0
47 46
48config XILINX_MICROBLAZE0_USE_DIV 47config XILINX_MICROBLAZE0_USE_DIV
49 int "USE_DIV range (0:1)" if ALLOW_EDIT_AUTO 48 int "USE_DIV range (0:1)"
50 default 1 49 default 0
51 50
52config XILINX_MICROBLAZE0_USE_HW_MUL 51config XILINX_MICROBLAZE0_USE_HW_MUL
53 int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)" if ALLOW_EDIT_AUTO 52 int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
54 default 2 53 default 0
55 54
56config XILINX_MICROBLAZE0_USE_FPU 55config XILINX_MICROBLAZE0_USE_FPU
57 int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)" if ALLOW_EDIT_AUTO 56 int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
58 default 2 57 default 0
59 58
60config XILINX_MICROBLAZE0_HW_VER 59config XILINX_MICROBLAZE0_HW_VER
61 string "Core version number" if ALLOW_EDIT_AUTO 60 string "Core version number"
62 default 7.10.d 61 default 7.10.d
diff --git a/arch/microblaze/platform/generic/system.dts b/arch/microblaze/platform/generic/system.dts
index 29993f62b30a..2d5c41767cd0 100644
--- a/arch/microblaze/platform/generic/system.dts
+++ b/arch/microblaze/platform/generic/system.dts
@@ -32,11 +32,16 @@
32 #address-cells = <1>; 32 #address-cells = <1>;
33 #size-cells = <1>; 33 #size-cells = <1>;
34 compatible = "xlnx,microblaze"; 34 compatible = "xlnx,microblaze";
35 hard-reset-gpios = <&LEDs_8Bit 2 1>;
35 model = "testing"; 36 model = "testing";
36 DDR2_SDRAM: memory@90000000 { 37 DDR2_SDRAM: memory@90000000 {
37 device_type = "memory"; 38 device_type = "memory";
38 reg = < 0x90000000 0x10000000 >; 39 reg = < 0x90000000 0x10000000 >;
39 } ; 40 } ;
41 aliases {
42 ethernet0 = &Hard_Ethernet_MAC;
43 serial0 = &RS232_Uart_1;
44 } ;
40 chosen { 45 chosen {
41 bootargs = "console=ttyUL0,115200 highres=on"; 46 bootargs = "console=ttyUL0,115200 highres=on";
42 linux,stdout-path = "/plb@0/serial@84000000"; 47 linux,stdout-path = "/plb@0/serial@84000000";
@@ -127,7 +132,7 @@
127 mb_plb: plb@0 { 132 mb_plb: plb@0 {
128 #address-cells = <1>; 133 #address-cells = <1>;
129 #size-cells = <1>; 134 #size-cells = <1>;
130 compatible = "xlnx,plb-v46-1.03.a", "simple-bus"; 135 compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
131 ranges ; 136 ranges ;
132 FLASH: flash@a0000000 { 137 FLASH: flash@a0000000 {
133 bank-width = <2>; 138 bank-width = <2>;
@@ -214,12 +219,12 @@
214 #size-cells = <1>; 219 #size-cells = <1>;
215 compatible = "xlnx,compound"; 220 compatible = "xlnx,compound";
216 ethernet@81c00000 { 221 ethernet@81c00000 {
217 compatible = "xlnx,xps-ll-temac-1.01.b"; 222 compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
218 device_type = "network"; 223 device_type = "network";
219 interrupt-parent = <&xps_intc_0>; 224 interrupt-parent = <&xps_intc_0>;
220 interrupts = < 5 2 >; 225 interrupts = < 5 2 >;
221 llink-connected = <&PIM3>; 226 llink-connected = <&PIM3>;
222 local-mac-address = [ 02 00 00 00 00 00 ]; 227 local-mac-address = [ 00 0a 35 00 00 00 ];
223 reg = < 0x81c00000 0x40 >; 228 reg = < 0x81c00000 0x40 >;
224 xlnx,bus2core-clk-ratio = <0x1>; 229 xlnx,bus2core-clk-ratio = <0x1>;
225 xlnx,phy-type = <0x1>; 230 xlnx,phy-type = <0x1>;
@@ -261,6 +266,33 @@
261 xlnx,is-dual = <0x0>; 266 xlnx,is-dual = <0x0>;
262 xlnx,tri-default = <0xffffffff>; 267 xlnx,tri-default = <0xffffffff>;
263 xlnx,tri-default-2 = <0xffffffff>; 268 xlnx,tri-default-2 = <0xffffffff>;
269 #gpio-cells = <2>;
270 gpio-controller;
271 } ;
272
273 gpio-leds {
274 compatible = "gpio-leds";
275
276 heartbeat {
277 label = "Heartbeat";
278 gpios = <&LEDs_8Bit 4 1>;
279 linux,default-trigger = "heartbeat";
280 };
281
282 yellow {
283 label = "Yellow";
284 gpios = <&LEDs_8Bit 5 1>;
285 };
286
287 red {
288 label = "Red";
289 gpios = <&LEDs_8Bit 6 1>;
290 };
291
292 green {
293 label = "Green";
294 gpios = <&LEDs_8Bit 7 1>;
295 };
264 } ; 296 } ;
265 RS232_Uart_1: serial@84000000 { 297 RS232_Uart_1: serial@84000000 {
266 clock-frequency = <125000000>; 298 clock-frequency = <125000000>;
diff --git a/arch/microblaze/platform/platform.c b/arch/microblaze/platform/platform.c
index 56e0234fa34b..5b89b58c5aed 100644
--- a/arch/microblaze/platform/platform.c
+++ b/arch/microblaze/platform/platform.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <asm/prom.h> 15#include <asm/prom.h>
16#include <asm/setup.h>
16 17
17static struct of_device_id xilinx_of_bus_ids[] __initdata = { 18static struct of_device_id xilinx_of_bus_ids[] __initdata = {
18 { .compatible = "simple-bus", }, 19 { .compatible = "simple-bus", },
@@ -26,6 +27,7 @@ static struct of_device_id xilinx_of_bus_ids[] __initdata = {
26static int __init microblaze_device_probe(void) 27static int __init microblaze_device_probe(void)
27{ 28{
28 of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL); 29 of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
30 of_platform_reset_gpio_probe();
29 return 0; 31 return 0;
30} 32}
31device_initcall(microblaze_device_probe); 33device_initcall(microblaze_device_probe);