aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/Kconfig67
-rw-r--r--arch/microblaze/Makefile5
-rw-r--r--arch/microblaze/boot/Makefile6
-rw-r--r--arch/microblaze/include/asm/device.h4
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h154
-rw-r--r--arch/microblaze/include/asm/futex.h2
-rw-r--r--arch/microblaze/include/asm/io.h36
-rw-r--r--arch/microblaze/include/asm/irq.h37
-rw-r--r--arch/microblaze/include/asm/page.h12
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h195
-rw-r--r--arch/microblaze/include/asm/pci.h178
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h40
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/include/asm/segment.h49
-rw-r--r--arch/microblaze/include/asm/system.h3
-rw-r--r--arch/microblaze/include/asm/thread_info.h5
-rw-r--r--arch/microblaze/include/asm/tlbflush.h5
-rw-r--r--arch/microblaze/include/asm/uaccess.h447
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c211
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c1
-rw-r--r--arch/microblaze/kernel/dma.c157
-rw-r--r--arch/microblaze/kernel/entry.S116
-rw-r--r--arch/microblaze/kernel/ftrace.c12
-rw-r--r--arch/microblaze/kernel/head.S21
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/irq.c15
-rw-r--r--arch/microblaze/kernel/misc.S15
-rw-r--r--arch/microblaze/kernel/module.c1
-rw-r--r--arch/microblaze/kernel/of_platform.c1
-rw-r--r--arch/microblaze/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/ptrace.c1
-rw-r--r--arch/microblaze/kernel/setup.c69
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c1
-rw-r--r--arch/microblaze/kernel/traps.c6
-rw-r--r--arch/microblaze/lib/Makefile3
-rw-r--r--arch/microblaze/lib/fastcopy.S6
-rw-r--r--arch/microblaze/lib/memcpy.c2
-rw-r--r--arch/microblaze/lib/memset.c15
-rw-r--r--arch/microblaze/lib/uaccess.c48
-rw-r--r--arch/microblaze/lib/uaccess_old.S45
-rw-r--r--arch/microblaze/mm/Makefile2
-rw-r--r--arch/microblaze/mm/consistent.c247
-rw-r--r--arch/microblaze/mm/fault.c24
-rw-r--r--arch/microblaze/mm/init.c45
-rw-r--r--arch/microblaze/mm/pgtable.c4
-rw-r--r--arch/microblaze/pci/Makefile6
-rw-r--r--arch/microblaze/pci/indirect_pci.c163
-rw-r--r--arch/microblaze/pci/iomap.c39
-rw-r--r--arch/microblaze/pci/pci-common.c1643
-rw-r--r--arch/microblaze/pci/pci_32.c431
-rw-r--r--arch/microblaze/pci/xilinx_pci.c168
55 files changed, 4256 insertions, 650 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index b008168ae946..76818f926539 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -14,6 +14,8 @@ config MICROBLAZE
14 select USB_ARCH_HAS_EHCI 14 select USB_ARCH_HAS_EHCI
15 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select HAVE_OPROFILE 16 select HAVE_OPROFILE
17 select HAVE_DMA_ATTRS
18 select HAVE_DMA_API_DEBUG
17 select TRACING_SUPPORT 19 select TRACING_SUPPORT
18 20
19config SWAP 21config SWAP
@@ -73,12 +75,6 @@ config LOCKDEP_SUPPORT
73config HAVE_LATENCYTOP_SUPPORT 75config HAVE_LATENCYTOP_SUPPORT
74 def_bool y 76 def_bool y
75 77
76config PCI
77 def_bool n
78
79config NO_DMA
80 def_bool y
81
82config DTC 78config DTC
83 def_bool y 79 def_bool y
84 80
@@ -146,7 +142,6 @@ menu "Advanced setup"
146 142
147config ADVANCED_OPTIONS 143config ADVANCED_OPTIONS
148 bool "Prompt for advanced kernel configuration options" 144 bool "Prompt for advanced kernel configuration options"
149 depends on MMU
150 help 145 help
151 This option will enable prompting for a variety of advanced kernel 146 This option will enable prompting for a variety of advanced kernel
152 configuration options. These options can cause the kernel to not 147 configuration options. These options can cause the kernel to not
@@ -158,6 +153,15 @@ config ADVANCED_OPTIONS
158comment "Default settings for advanced configuration options are used" 153comment "Default settings for advanced configuration options are used"
159 depends on !ADVANCED_OPTIONS 154 depends on !ADVANCED_OPTIONS
160 155
156config XILINX_UNCACHED_SHADOW
157 bool "Are you using uncached shadow for RAM ?"
158 depends on ADVANCED_OPTIONS && !MMU
159 default n
160 help
161 This is needed to be able to allocate uncachable memory regions.
162 The feature requires the design to define the RAM memory controller
163 window to be twice as large as the actual physical memory.
164
161config HIGHMEM_START_BOOL 165config HIGHMEM_START_BOOL
162 bool "Set high memory pool address" 166 bool "Set high memory pool address"
163 depends on ADVANCED_OPTIONS && HIGHMEM 167 depends on ADVANCED_OPTIONS && HIGHMEM
@@ -175,7 +179,7 @@ config HIGHMEM_START
175 179
176config LOWMEM_SIZE_BOOL 180config LOWMEM_SIZE_BOOL
177 bool "Set maximum low memory" 181 bool "Set maximum low memory"
178 depends on ADVANCED_OPTIONS 182 depends on ADVANCED_OPTIONS && MMU
179 help 183 help
180 This option allows you to set the maximum amount of memory which 184 This option allows you to set the maximum amount of memory which
181 will be used as "low memory", that is, memory which the kernel can 185 will be used as "low memory", that is, memory which the kernel can
@@ -187,7 +191,6 @@ config LOWMEM_SIZE_BOOL
187 191
188config LOWMEM_SIZE 192config LOWMEM_SIZE
189 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 193 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
190 depends on MMU
191 default "0x30000000" 194 default "0x30000000"
192 195
193config KERNEL_START_BOOL 196config KERNEL_START_BOOL
@@ -208,7 +211,7 @@ config KERNEL_START
208 211
209config TASK_SIZE_BOOL 212config TASK_SIZE_BOOL
210 bool "Set custom user task size" 213 bool "Set custom user task size"
211 depends on ADVANCED_OPTIONS 214 depends on ADVANCED_OPTIONS && MMU
212 help 215 help
213 This option allows you to set the amount of virtual address space 216 This option allows you to set the amount of virtual address space
214 allocated to user tasks. This can be useful in optimizing the 217 allocated to user tasks. This can be useful in optimizing the
@@ -218,42 +221,34 @@ config TASK_SIZE_BOOL
218 221
219config TASK_SIZE 222config TASK_SIZE
220 hex "Size of user task space" if TASK_SIZE_BOOL 223 hex "Size of user task space" if TASK_SIZE_BOOL
221 depends on MMU
222 default "0x80000000" 224 default "0x80000000"
223 225
224config CONSISTENT_START_BOOL 226endmenu
225 bool "Set custom consistent memory pool address"
226 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
227 help
228 This option allows you to set the base virtual address
229 of the the consistent memory pool. This pool of virtual
230 memory is used to make consistent memory allocations.
231 227
232config CONSISTENT_START 228source "mm/Kconfig"
233 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
234 depends on MMU
235 default "0xff100000" if NOT_COHERENT_CACHE
236 229
237config CONSISTENT_SIZE_BOOL 230menu "Exectuable file formats"
238 bool "Set custom consistent memory pool size"
239 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
240 help
241 This option allows you to set the size of the the
242 consistent memory pool. This pool of virtual memory
243 is used to make consistent memory allocations.
244 231
245config CONSISTENT_SIZE 232source "fs/Kconfig.binfmt"
246 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
247 depends on MMU
248 default "0x00200000" if NOT_COHERENT_CACHE
249 233
250endmenu 234endmenu
251 235
252source "mm/Kconfig" 236menu "Bus Options"
253 237
254menu "Exectuable file formats" 238config PCI
239 bool "PCI support"
255 240
256source "fs/Kconfig.binfmt" 241config PCI_DOMAINS
242 def_bool PCI
243
244config PCI_SYSCALL
245 def_bool PCI
246
247config PCI_XILINX
248 bool "Xilinx PCI host bridge support"
249 depends on PCI
250
251source "drivers/pci/Kconfig"
257 252
258endmenu 253endmenu
259 254
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d2d6cfcb1a30..72f6e8583746 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -50,6 +50,7 @@ libs-y += $(LIBGCC)
50core-y += arch/microblaze/kernel/ 50core-y += arch/microblaze/kernel/
51core-y += arch/microblaze/mm/ 51core-y += arch/microblaze/mm/
52core-y += arch/microblaze/platform/ 52core-y += arch/microblaze/platform/
53core-$(CONFIG_PCI) += arch/microblaze/pci/
53 54
54drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ 55drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
55 56
@@ -83,7 +84,7 @@ define archhelp
83 echo '* linux.bin - Create raw binary' 84 echo '* linux.bin - Create raw binary'
84 echo ' linux.bin.gz - Create compressed raw binary' 85 echo ' linux.bin.gz - Create compressed raw binary'
85 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' 86 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
86 echo ' - stripped elf with fdt blob 87 echo ' - stripped elf with fdt blob'
87 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' 88 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
88 echo ' *_defconfig - Select default config from arch/microblaze/configs' 89 echo ' *_defconfig - Select default config from arch/microblaze/configs'
89 echo '' 90 echo ''
@@ -93,3 +94,5 @@ define archhelp
93 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory' 94 echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
94 echo ' (minus the .dts extension).' 95 echo ' (minus the .dts extension).'
95endef 96endef
97
98MRPROPER_FILES += $(boot)/simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 902cf9846c3c..57f50c2371c6 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
23endif 23endif
24 24
25$(obj)/linux.bin: vmlinux FORCE 25$(obj)/linux.bin: vmlinux FORCE
26 [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
27 touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
28 $(call if_changed,objcopy) 26 $(call if_changed,objcopy)
29 $(call if_changed,uimage) 27 $(call if_changed,uimage)
30 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 28 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@
62$(obj)/%.dtb: $(dtstree)/%.dts FORCE 60$(obj)/%.dtb: $(dtstree)/%.dts FORCE
63 $(call if_changed,dtc) 61 $(call if_changed,dtc)
64 62
65clean-kernel += linux.bin linux.bin.gz simpleImage.* 63clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
66
67clean-files += *.dtb simpleImage.*.unstrip
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 78a038452c0f..402b46e630f6 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -14,6 +14,10 @@ struct device_node;
14struct dev_archdata { 14struct dev_archdata {
15 /* Optional pointer to an OF device node */ 15 /* Optional pointer to an OF device node */
16 struct device_node *of_node; 16 struct device_node *of_node;
17
18 /* DMA operations on that device */
19 struct dma_map_ops *dma_ops;
20 void *dma_data;
17}; 21};
18 22
19struct pdev_archdata { 23struct pdev_archdata {
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index d00e40099165..18b3731c8509 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -1 +1,153 @@
1#include <asm-generic/dma-mapping-broken.h> 1/*
2 * Implements the generic device dma API for microblaze and the pci
3 *
4 * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009-2010 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * This file is base on powerpc and x86 dma-mapping.h versions
12 * Copyright (C) 2004 IBM
13 */
14
15#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
16#define _ASM_MICROBLAZE_DMA_MAPPING_H
17
18/*
19 * See Documentation/PCI/PCI-DMA-mapping.txt and
20 * Documentation/DMA-API.txt for documentation.
21 */
22
23#include <linux/types.h>
24#include <linux/cache.h>
25#include <linux/mm.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-debug.h>
28#include <linux/dma-attrs.h>
29#include <asm/io.h>
30#include <asm-generic/dma-coherent.h>
31
32#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
33
34#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
35#define __dma_free_coherent(size, addr) ((void)0)
36#define __dma_sync(addr, size, rw) ((void)0)
37
38static inline unsigned long device_to_mask(struct device *dev)
39{
40 if (dev->dma_mask && *dev->dma_mask)
41 return *dev->dma_mask;
42 /* Assume devices without mask can take 32 bit addresses */
43 return 0xfffffffful;
44}
45
46extern struct dma_map_ops *dma_ops;
47
48/*
49 * Available generic sets of operations
50 */
51extern struct dma_map_ops dma_direct_ops;
52
53static inline struct dma_map_ops *get_dma_ops(struct device *dev)
54{
55 /* We don't handle the NULL dev case for ISA for now. We could
56 * do it via an out of line call but it is not needed for now. The
57 * only ISA DMA device we support is the floppy and we have a hack
58 * in the floppy driver directly to get a device for us.
59 */
60 if (unlikely(!dev) || !dev->archdata.dma_ops)
61 return NULL;
62
63 return dev->archdata.dma_ops;
64}
65
66static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
67{
68 dev->archdata.dma_ops = ops;
69}
70
71static inline int dma_supported(struct device *dev, u64 mask)
72{
73 struct dma_map_ops *ops = get_dma_ops(dev);
74
75 if (unlikely(!ops))
76 return 0;
77 if (!ops->dma_supported)
78 return 1;
79 return ops->dma_supported(dev, mask);
80}
81
82#ifdef CONFIG_PCI
83/* We have our own implementation of pci_set_dma_mask() */
84#define HAVE_ARCH_PCI_SET_DMA_MASK
85
86#endif
87
88static inline int dma_set_mask(struct device *dev, u64 dma_mask)
89{
90 struct dma_map_ops *ops = get_dma_ops(dev);
91
92 if (unlikely(ops == NULL))
93 return -EIO;
94 if (ops->set_dma_mask)
95 return ops->set_dma_mask(dev, dma_mask);
96 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
97 return -EIO;
98 *dev->dma_mask = dma_mask;
99 return 0;
100}
101
102#include <asm-generic/dma-mapping-common.h>
103
104static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
105{
106 struct dma_map_ops *ops = get_dma_ops(dev);
107 if (ops->mapping_error)
108 return ops->mapping_error(dev, dma_addr);
109
110 return (dma_addr == DMA_ERROR_CODE);
111}
112
113#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
114#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
115#define dma_is_consistent(d, h) (1)
116
117static inline void *dma_alloc_coherent(struct device *dev, size_t size,
118 dma_addr_t *dma_handle, gfp_t flag)
119{
120 struct dma_map_ops *ops = get_dma_ops(dev);
121 void *memory;
122
123 BUG_ON(!ops);
124
125 memory = ops->alloc_coherent(dev, size, dma_handle, flag);
126
127 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
128 return memory;
129}
130
131static inline void dma_free_coherent(struct device *dev, size_t size,
132 void *cpu_addr, dma_addr_t dma_handle)
133{
134 struct dma_map_ops *ops = get_dma_ops(dev);
135
136 BUG_ON(!ops);
137 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
138 ops->free_coherent(dev, size, cpu_addr, dma_handle);
139}
140
141static inline int dma_get_cache_alignment(void)
142{
143 return L1_CACHE_BYTES;
144}
145
146static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
147 enum dma_data_direction direction)
148{
149 BUG_ON(direction == DMA_NONE);
150 __dma_sync(vaddr, size, (int)direction);
151}
152
153#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 8dbb6e7a03a2..ad3fd61b2fe7 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -55,7 +55,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
55 __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg); 55 __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg);
56 break; 56 break;
57 case FUTEX_OP_ANDN: 57 case FUTEX_OP_ANDN:
58 __futex_atomic_op("and %1,%0,%4;", ret, oldval, uaddr, oparg); 58 __futex_atomic_op("andn %1,%0,%4;", ret, oldval, uaddr, oparg);
59 break; 59 break;
60 case FUTEX_OP_XOR: 60 case FUTEX_OP_XOR:
61 __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg); 61 __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg);
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 267c7c779e53..e45a6eea92e0 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -15,7 +15,23 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/mm.h> /* Get struct page {...} */ 17#include <linux/mm.h> /* Get struct page {...} */
18#include <asm-generic/iomap.h>
18 19
20#ifndef CONFIG_PCI
21#define _IO_BASE 0
22#define _ISA_MEM_BASE 0
23#define PCI_DRAM_OFFSET 0
24#else
25#define _IO_BASE isa_io_base
26#define _ISA_MEM_BASE isa_mem_base
27#define PCI_DRAM_OFFSET pci_dram_offset
28#endif
29
30extern unsigned long isa_io_base;
31extern unsigned long pci_io_base;
32extern unsigned long pci_dram_offset;
33
34extern resource_size_t isa_mem_base;
19 35
20#define IO_SPACE_LIMIT (0xFFFFFFFF) 36#define IO_SPACE_LIMIT (0xFFFFFFFF)
21 37
@@ -92,6 +108,11 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
92#define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr)) 108#define iowrite16(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
93#define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr)) 109#define iowrite32(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
94 110
111#define ioread16be(addr) __raw_readw((u16 *)(addr))
112#define ioread32be(addr) __raw_readl((u32 *)(addr))
113#define iowrite16be(v, addr) __raw_writew((u16)(v), (u16 *)(addr))
114#define iowrite32be(v, addr) __raw_writel((u32)(v), (u32 *)(addr))
115
95/* These are the definitions for the x86 IO instructions 116/* These are the definitions for the x86 IO instructions
96 * inb/inw/inl/outb/outw/outl, the "string" versions 117 * inb/inw/inl/outb/outw/outl, the "string" versions
97 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions 118 * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
@@ -124,9 +145,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
124#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 145#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
125#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 146#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
126 147
127#define __page_address(page) \
128 (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
129#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
130#define page_to_bus(page) (page_to_phys(page)) 148#define page_to_bus(page) (page_to_phys(page))
131#define bus_to_virt(addr) (phys_to_virt(addr)) 149#define bus_to_virt(addr) (phys_to_virt(addr))
132 150
@@ -227,15 +245,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
227#define out_8(a, v) __raw_writeb((v), (a)) 245#define out_8(a, v) __raw_writeb((v), (a))
228#define in_8(a) __raw_readb(a) 246#define in_8(a) __raw_readb(a)
229 247
230/* FIXME */ 248#define ioport_map(port, nr) ((void __iomem *)(port))
231static inline void __iomem *ioport_map(unsigned long port, unsigned int len) 249#define ioport_unmap(addr)
232{
233 return (void __iomem *) (port);
234}
235
236static inline void ioport_unmap(void __iomem *addr)
237{
238 /* Nothing to do */
239}
240 250
241#endif /* _ASM_MICROBLAZE_IO_H */ 251#endif /* _ASM_MICROBLAZE_IO_H */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index 90f050535ebe..31a35c33df63 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -14,6 +14,12 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16
17/* This type is the placeholder for a hardware interrupt number. It has to
18 * be big enough to enclose whatever representation is used by a given
19 * platform.
20 */
21typedef unsigned long irq_hw_number_t;
22
17extern unsigned int nr_irq; 23extern unsigned int nr_irq;
18 24
19#define NO_IRQ (-1) 25#define NO_IRQ (-1)
@@ -21,7 +27,8 @@ extern unsigned int nr_irq;
21struct pt_regs; 27struct pt_regs;
22extern void do_IRQ(struct pt_regs *regs); 28extern void do_IRQ(struct pt_regs *regs);
23 29
24/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 30/**
31 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
25 * @device: Device node of the device whose interrupt is to be mapped 32 * @device: Device node of the device whose interrupt is to be mapped
26 * @index: Index of the interrupt to map 33 * @index: Index of the interrupt to map
27 * 34 *
@@ -40,4 +47,32 @@ static inline void irq_dispose_mapping(unsigned int virq)
40 return; 47 return;
41} 48}
42 49
50struct irq_host;
51
52/**
53 * irq_create_mapping - Map a hardware interrupt into linux virq space
54 * @host: host owning this hardware interrupt or NULL for default host
55 * @hwirq: hardware irq number in that host space
56 *
57 * Only one mapping per hardware interrupt is permitted. Returns a linux
58 * virq number.
59 * If the sense/trigger is to be specified, set_irq_type() should be called
60 * on the number returned from that call.
61 */
62extern unsigned int irq_create_mapping(struct irq_host *host,
63 irq_hw_number_t hwirq);
64
65/**
66 * irq_create_of_mapping - Map a hardware interrupt into linux virq space
67 * @controller: Device node of the interrupt controller
68 * @inspec: Interrupt specifier from the device-tree
69 * @intsize: Size of the interrupt specifier from the device-tree
70 *
71 * This function is identical to irq_create_mapping except that it takes
72 * as input informations straight from the device-tree (typically the results
73 * of the of_irq_map_*() functions.
74 */
75extern unsigned int irq_create_of_mapping(struct device_node *controller,
76 u32 *intspec, unsigned int intsize);
77
43#endif /* _ASM_MICROBLAZE_IRQ_H */ 78#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 9b66c0fa9a32..2dd1d04129e0 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -62,12 +62,6 @@ extern unsigned int __page_offset;
62#define PAGE_OFFSET CONFIG_KERNEL_START 62#define PAGE_OFFSET CONFIG_KERNEL_START
63 63
64/* 64/*
65 * MAP_NR -- given an address, calculate the index of the page struct which
66 * points to the address's page.
67 */
68#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
69
70/*
71 * The basic type of a PTE - 32 bit physical addressing. 65 * The basic type of a PTE - 32 bit physical addressing.
72 */ 66 */
73typedef unsigned long pte_basic_t; 67typedef unsigned long pte_basic_t;
@@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn);
154# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 148# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
155 149
156# ifdef CONFIG_MMU 150# ifdef CONFIG_MMU
157# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) 151
152# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
153# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
154# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
155
158# else /* CONFIG_MMU */ 156# else /* CONFIG_MMU */
159# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 157# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
160# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 158# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 7ad28f6f5f1a..0c77cda9f5d8 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -1 +1,196 @@
1#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H
2#define _ASM_MICROBLAZE_PCI_BRIDGE_H
3#ifdef __KERNEL__
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
1#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/list.h>
12#include <linux/ioport.h>
13
14struct device_node;
15
16enum {
17 /* Force re-assigning all resources (ignore firmware
18 * setup completely)
19 */
20 PCI_REASSIGN_ALL_RSRC = 0x00000001,
21
22 /* Re-assign all bus numbers */
23 PCI_REASSIGN_ALL_BUS = 0x00000002,
24
25 /* Do not try to assign, just use existing setup */
26 PCI_PROBE_ONLY = 0x00000004,
27
28 /* Don't bother with ISA alignment unless the bridge has
29 * ISA forwarding enabled
30 */
31 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
32
33 /* Enable domain numbers in /proc */
34 PCI_ENABLE_PROC_DOMAINS = 0x00000010,
35 /* ... except for domain 0 */
36 PCI_COMPAT_DOMAIN_0 = 0x00000020,
37};
38
39/*
40 * Structure of a PCI controller (host bridge)
41 */
42struct pci_controller {
43 struct pci_bus *bus;
44 char is_dynamic;
45 struct device_node *dn;
46 struct list_head list_node;
47 struct device *parent;
48
49 int first_busno;
50 int last_busno;
51
52 int self_busno;
53
54 void __iomem *io_base_virt;
55 resource_size_t io_base_phys;
56
57 resource_size_t pci_io_size;
58
59 /* Some machines (PReP) have a non 1:1 mapping of
60 * the PCI memory space in the CPU bus space
61 */
62 resource_size_t pci_mem_offset;
63
64 /* Some machines have a special region to forward the ISA
65 * "memory" cycles such as VGA memory regions. Left to 0
66 * if unsupported
67 */
68 resource_size_t isa_mem_phys;
69 resource_size_t isa_mem_size;
70
71 struct pci_ops *ops;
72 unsigned int __iomem *cfg_addr;
73 void __iomem *cfg_data;
74
75 /*
76 * Used for variants of PCI indirect handling and possible quirks:
77 * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
78 * EXT_REG - provides access to PCI-e extended registers
79 * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
80 * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
81 * to determine which bus number to match on when generating type0
82 * config cycles
83 * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
84 * hanging if we don't have link and try to do config cycles to
85 * anything but the PHB. Only allow talking to the PHB if this is
86 * set.
87 * BIG_ENDIAN - cfg_addr is a big endian register
88 * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs
89 * on the PLB4. Effectively disable MRM commands by setting this.
90 */
91#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
92#define INDIRECT_TYPE_EXT_REG 0x00000002
93#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
94#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
95#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010
96#define INDIRECT_TYPE_BROKEN_MRM 0x00000020
97 u32 indirect_type;
98
99 /* Currently, we limit ourselves to 1 IO range and 3 mem
100 * ranges since the common pci_bus structure can't handle more
101 */
102 struct resource io_resource;
103 struct resource mem_resources[3];
104 int global_number; /* PCI domain number */
105};
106
107static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
108{
109 return bus->sysdata;
110}
111
112static inline int isa_vaddr_is_ioport(void __iomem *address)
113{
114 /* No specific ISA handling on ppc32 at this stage, it
115 * all goes through PCI
116 */
117 return 0;
118}
119
120/* These are used for config access before all the PCI probing
121 has been done. */
122extern int early_read_config_byte(struct pci_controller *hose, int bus,
123 int dev_fn, int where, u8 *val);
124extern int early_read_config_word(struct pci_controller *hose, int bus,
125 int dev_fn, int where, u16 *val);
126extern int early_read_config_dword(struct pci_controller *hose, int bus,
127 int dev_fn, int where, u32 *val);
128extern int early_write_config_byte(struct pci_controller *hose, int bus,
129 int dev_fn, int where, u8 val);
130extern int early_write_config_word(struct pci_controller *hose, int bus,
131 int dev_fn, int where, u16 val);
132extern int early_write_config_dword(struct pci_controller *hose, int bus,
133 int dev_fn, int where, u32 val);
134
135extern int early_find_capability(struct pci_controller *hose, int bus,
136 int dev_fn, int cap);
137
138extern void setup_indirect_pci(struct pci_controller *hose,
139 resource_size_t cfg_addr,
140 resource_size_t cfg_data, u32 flags);
141
142/* Get the PCI host controller for an OF device */
143extern struct pci_controller *pci_find_hose_for_OF_device(
144 struct device_node *node);
145
146/* Fill up host controller resources from the OF node */
147extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
148 struct device_node *dev, int primary);
149
150/* Allocate & free a PCI host bridge structure */
151extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
152extern void pcibios_free_controller(struct pci_controller *phb);
153extern void pcibios_setup_phb_resources(struct pci_controller *hose);
154
155#ifdef CONFIG_PCI
156extern unsigned int pci_flags;
157
158static inline void pci_set_flags(int flags)
159{
160 pci_flags = flags;
161}
162
163static inline void pci_add_flags(int flags)
164{
165 pci_flags |= flags;
166}
167
168static inline int pci_has_flag(int flag)
169{
170 return pci_flags & flag;
171}
172
173extern struct list_head hose_list;
174
175extern unsigned long pci_address_to_pio(phys_addr_t address);
176extern int pcibios_vaddr_is_ioport(void __iomem *address);
177#else
178static inline unsigned long pci_address_to_pio(phys_addr_t address)
179{
180 return (unsigned long)-1;
181}
182static inline int pcibios_vaddr_is_ioport(void __iomem *address)
183{
184 return 0;
185}
186
187static inline void pci_set_flags(int flags) { }
188static inline void pci_add_flags(int flags) { }
189static inline int pci_has_flag(int flag)
190{
191 return 0;
192}
193#endif /* CONFIG_PCI */
194
195#endif /* __KERNEL__ */
196#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 9f0df5faf2c8..bdd65aaee30d 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -1 +1,177 @@
1#include <asm-generic/pci.h> 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Based on powerpc version
8 */
9
10#ifndef __ASM_MICROBLAZE_PCI_H
11#define __ASM_MICROBLAZE_PCI_H
12#ifdef __KERNEL__
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/dma-mapping.h>
18#include <linux/pci.h>
19
20#include <asm/scatterlist.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24
25#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM 0x10000000
27
28struct pci_dev;
29
30/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
31#define IOBASE_BRIDGE_NUMBER 0
32#define IOBASE_MEMORY 1
33#define IOBASE_IO 2
34#define IOBASE_ISA_IO 3
35#define IOBASE_ISA_MEM 4
36
37#define pcibios_scan_all_fns(a, b) 0
38
39/*
40 * Set this to 1 if you want the kernel to re-assign all PCI
41 * bus numbers (don't do that on ppc64 yet !)
42 */
43#define pcibios_assign_all_busses() \
44 (pci_has_flag(PCI_REASSIGN_ALL_BUS))
45
46static inline void pcibios_set_master(struct pci_dev *dev)
47{
48 /* No special bus mastering setup handling */
49}
50
51static inline void pcibios_penalize_isa_irq(int irq, int active)
52{
53 /* We don't do dynamic PCI IRQ allocation */
54}
55
56#ifdef CONFIG_PCI
57extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
58extern struct dma_map_ops *get_pci_dma_ops(void);
59#else /* CONFIG_PCI */
60#define set_pci_dma_ops(d)
61#define get_pci_dma_ops() NULL
62#endif
63
64#ifdef CONFIG_PCI
65static inline void pci_dma_burst_advice(struct pci_dev *pdev,
66 enum pci_dma_burst_strategy *strat,
67 unsigned long *strategy_parameter)
68{
69 *strat = PCI_DMA_BURST_INFINITY;
70 *strategy_parameter = ~0UL;
71}
72#endif
73
74extern int pci_domain_nr(struct pci_bus *bus);
75
76/* Decide whether to display the domain number in /proc */
77extern int pci_proc_domain(struct pci_bus *bus);
78
79struct vm_area_struct;
80/* Map a range of PCI memory or I/O space for a device into user space */
81int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
82 enum pci_mmap_state mmap_state, int write_combine);
83
84/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
85#define HAVE_PCI_MMAP 1
86
87extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
88 size_t count);
89extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
90 size_t count);
91extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
92 struct vm_area_struct *vma,
93 enum pci_mmap_state mmap_state);
94
95#define HAVE_PCI_LEGACY 1
96
97/* pci_unmap_{page,single} is a nop so... */
98#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
99#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
100#define pci_unmap_addr(PTR, ADDR_NAME) (0)
101#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
102#define pci_unmap_len(PTR, LEN_NAME) (0)
103#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
104
105/* The PCI address space does equal the physical memory
106 * address space (no IOMMU). The IDE and SCSI device layers use
107 * this boolean for bounce buffer decisions.
108 */
109#define PCI_DMA_BUS_IS_PHYS (1)
110
111extern void pcibios_resource_to_bus(struct pci_dev *dev,
112 struct pci_bus_region *region,
113 struct resource *res);
114
115extern void pcibios_bus_to_resource(struct pci_dev *dev,
116 struct resource *res,
117 struct pci_bus_region *region);
118
119static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
120 struct resource *res)
121{
122 struct resource *root = NULL;
123
124 if (res->flags & IORESOURCE_IO)
125 root = &ioport_resource;
126 if (res->flags & IORESOURCE_MEM)
127 root = &iomem_resource;
128
129 return root;
130}
131
132extern void pcibios_claim_one_bus(struct pci_bus *b);
133
134extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
135
136extern void pcibios_resource_survey(void);
137
138extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
139extern int remove_phb_dynamic(struct pci_controller *phb);
140
141extern struct pci_dev *of_create_pci_dev(struct device_node *node,
142 struct pci_bus *bus, int devfn);
143
144extern void of_scan_pci_bridge(struct device_node *node,
145 struct pci_dev *dev);
146
147extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
148extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
149
150extern int pci_read_irq_line(struct pci_dev *dev);
151
152extern int pci_bus_find_capability(struct pci_bus *bus,
153 unsigned int devfn, int cap);
154
155struct file;
156extern pgprot_t pci_phys_mem_access_prot(struct file *file,
157 unsigned long pfn,
158 unsigned long size,
159 pgprot_t prot);
160
161#define HAVE_ARCH_PCI_RESOURCE_TO_USER
162extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
163 const struct resource *rsrc,
164 resource_size_t *start, resource_size_t *end);
165
166extern void pcibios_setup_bus_devices(struct pci_bus *bus);
167extern void pcibios_setup_bus_self(struct pci_bus *bus);
168
169/* This part of code was originaly in xilinx-pci.h */
170#ifdef CONFIG_PCI_XILINX
171extern void __init xilinx_pci_init(void);
172#else
173static inline void __init xilinx_pci_init(void) { return; }
174#endif
175
176#endif /* __KERNEL__ */
177#endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 7547f5064560..f44b0d696fe2 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/cache.h> 21#include <asm/cache.h>
22#include <asm/pgtable.h>
22 23
23#define PGDIR_ORDER 0 24#define PGDIR_ORDER 0
24 25
@@ -111,7 +112,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
111 unsigned long address) 112 unsigned long address)
112{ 113{
113 pte_t *pte; 114 pte_t *pte;
114 extern int mem_init_done;
115 extern void *early_get_page(void); 115 extern void *early_get_page(void);
116 if (mem_init_done) { 116 if (mem_init_done) {
117 pte = (pte_t *)__get_free_page(GFP_KERNEL | 117 pte = (pte_t *)__get_free_page(GFP_KERNEL |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index cc3a4dfc3eaa..dd2bb60651c7 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -16,6 +16,10 @@
16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
17 remap_pfn_range(vma, vaddr, pfn, size, prot) 17 remap_pfn_range(vma, vaddr, pfn, size, prot)
18 18
19#ifndef __ASSEMBLY__
20extern int mem_init_done;
21#endif
22
19#ifndef CONFIG_MMU 23#ifndef CONFIG_MMU
20 24
21#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 25#define pgd_present(pgd) (1) /* pages are always present on non MMU */
@@ -51,6 +55,8 @@ static inline int pte_file(pte_t pte) { return 0; }
51 55
52#define arch_enter_lazy_cpu_mode() do {} while (0) 56#define arch_enter_lazy_cpu_mode() do {} while (0)
53 57
58#define pgprot_noncached_wc(prot) prot
59
54#else /* CONFIG_MMU */ 60#else /* CONFIG_MMU */
55 61
56#include <asm-generic/4level-fixup.h> 62#include <asm-generic/4level-fixup.h>
@@ -68,7 +74,6 @@ static inline int pte_file(pte_t pte) { return 0; }
68 74
69extern unsigned long va_to_phys(unsigned long address); 75extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address); 76extern pte_t *va_to_pte(unsigned long address);
71extern unsigned long ioremap_bot, ioremap_base;
72 77
73/* 78/*
74 * The following only work if pte_present() is true. 79 * The following only work if pte_present() is true.
@@ -85,11 +90,25 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
85#define VMALLOC_START (CONFIG_KERNEL_START + \ 90#define VMALLOC_START (CONFIG_KERNEL_START + \
86 max(32 * 1024 * 1024UL, memory_size)) 91 max(32 * 1024 * 1024UL, memory_size))
87#define VMALLOC_END ioremap_bot 92#define VMALLOC_END ioremap_bot
88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
89 93
90#endif /* __ASSEMBLY__ */ 94#endif /* __ASSEMBLY__ */
91 95
92/* 96/*
97 * Macro to mark a page protection value as "uncacheable".
98 */
99
100#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
101 _PAGE_WRITETHRU)
102
103#define pgprot_noncached(prot) \
104 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
105 _PAGE_NO_CACHE | _PAGE_GUARDED))
106
107#define pgprot_noncached_wc(prot) \
108 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
109 _PAGE_NO_CACHE))
110
111/*
93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash 112 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
94 * table containing PTEs, together with a set of 16 segment registers, to 113 * table containing PTEs, together with a set of 16 segment registers, to
95 * define the virtual to physical address mapping. 114 * define the virtual to physical address mapping.
@@ -397,7 +416,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
397 mts rmsr, %2\n\ 416 mts rmsr, %2\n\
398 nop" 417 nop"
399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) 418 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
400 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) 419 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
401 : "cc"); 420 : "cc");
402 421
403 return old; 422 return old;
@@ -566,18 +585,11 @@ void mapin_ram(void);
566int map_page(unsigned long va, phys_addr_t pa, int flags); 585int map_page(unsigned long va, phys_addr_t pa, int flags);
567 586
568extern int mem_init_done; 587extern int mem_init_done;
569extern unsigned long ioremap_base;
570extern unsigned long ioremap_bot;
571 588
572asmlinkage void __init mmu_init(void); 589asmlinkage void __init mmu_init(void);
573 590
574void __init *early_get_page(void); 591void __init *early_get_page(void);
575 592
576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
577void consistent_free(void *vaddr);
578void consistent_sync(void *vaddr, size_t size, int direction);
579void consistent_sync_page(struct page *page, unsigned long offset,
580 size_t size, int direction);
581#endif /* __ASSEMBLY__ */ 593#endif /* __ASSEMBLY__ */
582#endif /* __KERNEL__ */ 594#endif /* __KERNEL__ */
583 595
@@ -586,6 +598,14 @@ void consistent_sync_page(struct page *page, unsigned long offset,
586#ifndef __ASSEMBLY__ 598#ifndef __ASSEMBLY__
587#include <asm-generic/pgtable.h> 599#include <asm-generic/pgtable.h>
588 600
601extern unsigned long ioremap_bot, ioremap_base;
602
603void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
604void consistent_free(void *vaddr);
605void consistent_sync(void *vaddr, size_t size, int direction);
606void consistent_sync_page(struct page *page, unsigned long offset,
607 size_t size, int direction);
608
589void setup_memory(void); 609void setup_memory(void);
590#endif /* __ASSEMBLY__ */ 610#endif /* __ASSEMBLY__ */
591 611
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 563c6b9453f0..8eeb09211ece 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -14,7 +14,6 @@
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/registers.h> 16#include <asm/registers.h>
17#include <asm/segment.h>
18#include <asm/entry.h> 17#include <asm/entry.h>
19#include <asm/current.h> 18#include <asm/current.h>
20 19
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 03f45a963204..e7d67a329bd7 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -31,6 +31,21 @@
31/* Other Prototypes */ 31/* Other Prototypes */
32extern int early_uartlite_console(void); 32extern int early_uartlite_console(void);
33 33
34#ifdef CONFIG_PCI
35/*
36 * PCI <-> OF matching functions
37 * (XXX should these be here?)
38 */
39struct pci_bus;
40struct pci_dev;
41extern int pci_device_from_OF_node(struct device_node *node,
42 u8 *bus, u8 *devfn);
43extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
44 int devfn);
45extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
46extern void pci_create_OF_bus_map(void);
47#endif
48
34/* 49/*
35 * OF address retreival & translation 50 * OF address retreival & translation
36 */ 51 */
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
deleted file mode 100644
index 0e7102c3fb11..000000000000
--- a/arch/microblaze/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_SEGMENT_H
12#define _ASM_MICROBLAZE_SEGMENT_H
13
14# ifndef __ASSEMBLY__
15
16typedef struct {
17 unsigned long seg;
18} mm_segment_t;
19
20/*
21 * On Microblaze the fs value is actually the top of the corresponding
22 * address space.
23 *
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 *
30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
31 */
32# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34# ifndef CONFIG_MMU
35# define KERNEL_DS MAKE_MM_SEG(0)
36# define USER_DS KERNEL_DS
37# else
38# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
40# endif
41
42# define get_ds() (KERNEL_DS)
43# define get_fs() (current_thread_info()->addr_limit)
44# define set_fs(val) (current_thread_info()->addr_limit = (val))
45
46# define segment_eq(a, b) ((a).seg == (b).seg)
47
48# endif /* __ASSEMBLY__ */
49#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 157970688b2a..59efb3fef957 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -87,6 +87,9 @@ void free_initmem(void);
87extern char *klimit; 87extern char *klimit;
88extern void ret_from_fork(void); 88extern void ret_from_fork(void);
89 89
90extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
91extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
92
90#ifdef CONFIG_DEBUG_FS 93#ifdef CONFIG_DEBUG_FS
91extern struct dentry *of_debugfs_root; 94extern struct dentry *of_debugfs_root;
92#endif 95#endif
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 6e92885d381a..b2ca80f64640 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -19,7 +19,6 @@
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20# include <linux/types.h> 20# include <linux/types.h>
21# include <asm/processor.h> 21# include <asm/processor.h>
22# include <asm/segment.h>
23 22
24/* 23/*
25 * low level task data that entry.S needs immediate access to 24 * low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@ struct cpu_context {
60 __u32 fsr; 59 __u32 fsr;
61}; 60};
62 61
62typedef struct {
63 unsigned long seg;
64} mm_segment_t;
65
63struct thread_info { 66struct thread_info {
64 struct task_struct *task; /* main task structure */ 67 struct task_struct *task; /* main task structure */
65 struct exec_domain *exec_domain; /* execution domain */ 68 struct exec_domain *exec_domain; /* execution domain */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index 10ec70cd8735..2e1353c2d18d 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -23,7 +23,8 @@
23extern void _tlbie(unsigned long address); 23extern void _tlbie(unsigned long address);
24extern void _tlbia(void); 24extern void _tlbia(void);
25 25
26#define __tlbia() _tlbia() 26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
27#define __tlbie(x) { _tlbie(x); }
27 28
28static inline void local_flush_tlb_all(void) 29static inline void local_flush_tlb_all(void)
29 { __tlbia(); } 30 { __tlbia(); }
@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
31 { __tlbia(); } 32 { __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma, 33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33 unsigned long vmaddr) 34 unsigned long vmaddr)
34 { _tlbie(vmaddr); } 35 { __tlbie(vmaddr); }
35static inline void local_flush_tlb_range(struct vm_area_struct *vma, 36static inline void local_flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end) 37 unsigned long start, unsigned long end)
37 { __tlbia(); } 38 { __tlbia(); }
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 371bd6e56d9a..446bec29b142 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -22,101 +22,73 @@
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/segment.h>
26#include <linux/string.h> 25#include <linux/string.h>
27 26
28#define VERIFY_READ 0 27#define VERIFY_READ 0
29#define VERIFY_WRITE 1 28#define VERIFY_WRITE 1
30 29
31#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0) 30/*
32 31 * On Microblaze the fs value is actually the top of the corresponding
33#ifndef CONFIG_MMU 32 * address space.
34 33 *
35extern int ___range_ok(unsigned long addr, unsigned long size); 34 * The fs value determines whether argument validity checking should be
36 35 * performed or not. If get_fs() == USER_DS, checking is performed, with
37#define __range_ok(addr, size) \ 36 * get_fs() == KERNEL_DS, checking is bypassed.
38 ___range_ok((unsigned long)(addr), (unsigned long)(size)) 37 *
39 38 * For historical reasons, these macros are grossly misnamed.
40#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 39 *
41#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) 40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
42 41 */
43/* Undefined function to trigger linker error */ 42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
44extern int bad_user_access_length(void);
45
46/* FIXME this is function for optimalization -> memcpy */
47#define __get_user(var, ptr) \
48({ \
49 int __gu_err = 0; \
50 switch (sizeof(*(ptr))) { \
51 case 1: \
52 case 2: \
53 case 4: \
54 (var) = *(ptr); \
55 break; \
56 case 8: \
57 memcpy((void *) &(var), (ptr), 8); \
58 break; \
59 default: \
60 (var) = 0; \
61 __gu_err = __get_user_bad(); \
62 break; \
63 } \
64 __gu_err; \
65})
66 43
67#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
68 51
69/* FIXME is not there defined __pu_val */ 52# define get_ds() (KERNEL_DS)
70#define __put_user(var, ptr) \ 53# define get_fs() (current_thread_info()->addr_limit)
71({ \ 54# define set_fs(val) (current_thread_info()->addr_limit = (val))
72 int __pu_err = 0; \
73 switch (sizeof(*(ptr))) { \
74 case 1: \
75 case 2: \
76 case 4: \
77 *(ptr) = (var); \
78 break; \
79 case 8: { \
80 typeof(*(ptr)) __pu_val = (var); \
81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
82 } \
83 break; \
84 default: \
85 __pu_err = __put_user_bad(); \
86 break; \
87 } \
88 __pu_err; \
89})
90 55
91#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) 56# define segment_eq(a, b) ((a).seg == (b).seg)
92 57
93#define put_user(x, ptr) __put_user((x), (ptr)) 58/*
94#define get_user(x, ptr) __get_user((x), (ptr)) 59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
63 * what to do.
64 *
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
69 */
70struct exception_table_entry {
71 unsigned long insn, fixup;
72};
95 73
96#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0) 74/* Returns 0 if exception not found and fixup otherwise. */
97#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0) 75extern unsigned long search_exception_table(unsigned long);
98 76
99#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n))) 77#ifndef CONFIG_MMU
100#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
101#define __copy_to_user_inatomic(to, from, n) \
102 (__copy_to_user((to), (from), (n)))
103#define __copy_from_user_inatomic(to, from, n) \
104 (__copy_from_user((to), (from), (n)))
105 78
106static inline unsigned long clear_user(void *addr, unsigned long size) 79/* Check against bounds of physical memory */
80static inline int ___range_ok(unsigned long addr, unsigned long size)
107{ 81{
108 if (access_ok(VERIFY_WRITE, addr, size)) 82 return ((addr < memory_start) ||
109 size = __clear_user(addr, size); 83 ((addr + size) > memory_end));
110 return size;
111} 84}
112 85
113/* Returns 0 if exception not found and fixup otherwise. */ 86#define __range_ok(addr, size) \
114extern unsigned long search_exception_table(unsigned long); 87 ___range_ok((unsigned long)(addr), (unsigned long)(size))
115 88
116extern long strncpy_from_user(char *dst, const char *src, long count); 89#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
117extern long strnlen_user(const char *src, long count);
118 90
119#else /* CONFIG_MMU */ 91#else
120 92
121/* 93/*
122 * Address is valid if: 94 * Address is valid if:
@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count);
129/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", 101/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */ 102 type?"WRITE":"READ",addr,size,get_fs().seg)) */
131 103
132/* 104#endif
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
136 */
137 105
138#define get_user(x, ptr) \ 106#ifdef CONFIG_MMU
139({ \ 107# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \ 108# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
141 ? __get_user((x), (ptr)) : -EFAULT; \ 109#else
142}) 110# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
111# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
112#endif
143 113
144#define put_user(x, ptr) \ 114extern unsigned long __copy_tofrom_user(void __user *to,
145({ \ 115 const void __user *from, unsigned long size);
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \ 116
147 ? __put_user((x), (ptr)) : -EFAULT; \ 117/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
118static inline unsigned long __must_check __clear_user(void __user *to,
119 unsigned long n)
120{
121 /* normal memset with two words to __ex_table */
122 __asm__ __volatile__ ( \
123 "1: sb r0, %2, r0;" \
124 " addik %0, %0, -1;" \
125 " bneid %0, 1b;" \
126 " addik %2, %2, 1;" \
127 "2: " \
128 __EX_TABLE_SECTION \
129 ".word 1b,2b;" \
130 ".previous;" \
131 : "=r"(n) \
132 : "0"(n), "r"(to)
133 );
134 return n;
135}
136
137static inline unsigned long __must_check clear_user(void __user *to,
138 unsigned long n)
139{
140 might_sleep();
141 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
142 return n;
143
144 return __clear_user(to, n);
145}
146
147/* put_user and get_user macros */
148extern long __user_bad(void);
149
150#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
151({ \
152 __asm__ __volatile__ ( \
153 "1:" insn " %1, %2, r0;" \
154 " addk %0, r0, r0;" \
155 "2: " \
156 __FIXUP_SECTION \
157 "3: brid 2b;" \
158 " addik %0, r0, %3;" \
159 ".previous;" \
160 __EX_TABLE_SECTION \
161 ".word 1b,3b;" \
162 ".previous;" \
163 : "=&r"(__gu_err), "=r"(__gu_val) \
164 : "r"(__gu_ptr), "i"(-EFAULT) \
165 ); \
148}) 166})
149 167
168/**
169 * get_user: - Get a simple variable from user space.
170 * @x: Variable to store result.
171 * @ptr: Source address, in user space.
172 *
173 * Context: User context only. This function may sleep.
174 *
175 * This macro copies a single simple variable from user space to kernel
176 * space. It supports simple types like char and int, but not larger
177 * data types like structures or arrays.
178 *
179 * @ptr must have pointer-to-simple-variable type, and the result of
180 * dereferencing @ptr must be assignable to @x without a cast.
181 *
182 * Returns zero on success, or -EFAULT on error.
183 * On error, the variable @x is set to zero.
184 */
185
150#define __get_user(x, ptr) \ 186#define __get_user(x, ptr) \
151({ \ 187({ \
152 unsigned long __gu_val; \ 188 unsigned long __gu_val; \
@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count);
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ 199 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
164 break; \ 200 break; \
165 default: \ 201 default: \
166 __gu_val = 0; __gu_err = -EINVAL; \ 202 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
167 } \ 203 } \
168 x = (__typeof__(*(ptr))) __gu_val; \ 204 x = (__typeof__(*(ptr))) __gu_val; \
169 __gu_err; \ 205 __gu_err; \
170}) 206})
171 207
172#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 208
209#define get_user(x, ptr) \
173({ \ 210({ \
174 __asm__ __volatile__ ( \ 211 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
175 "1:" insn " %1, %2, r0; \ 212 ? __get_user((x), (ptr)) : -EFAULT; \
176 addk %0, r0, r0; \ 213})
177 2: \ 214
178 .section .fixup,\"ax\"; \ 215#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
179 3: brid 2b; \ 216({ \
180 addik %0, r0, %3; \ 217 __asm__ __volatile__ ( \
181 .previous; \ 218 "1:" insn " %1, %2, r0;" \
182 .section __ex_table,\"a\"; \ 219 " addk %0, r0, r0;" \
183 .word 1b,3b; \ 220 "2: " \
184 .previous;" \ 221 __FIXUP_SECTION \
185 : "=r"(__gu_err), "=r"(__gu_val) \ 222 "3: brid 2b;" \
186 : "r"(__gu_ptr), "i"(-EFAULT) \ 223 " addik %0, r0, %3;" \
187 ); \ 224 ".previous;" \
225 __EX_TABLE_SECTION \
226 ".word 1b,3b;" \
227 ".previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
230 ); \
188}) 231})
189 232
233#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
234({ \
235 __asm__ __volatile__ (" lwi %0, %1, 0;" \
236 "1: swi %0, %2, 0;" \
237 " lwi %0, %1, 4;" \
238 "2: swi %0, %2, 4;" \
239 " addk %0, r0, r0;" \
240 "3: " \
241 __FIXUP_SECTION \
242 "4: brid 3b;" \
243 " addik %0, r0, %3;" \
244 ".previous;" \
245 __EX_TABLE_SECTION \
246 ".word 1b,4b,2b,4b;" \
247 ".previous;" \
248 : "=&r"(__gu_err) \
249 : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
250 ); \
251})
252
253/**
254 * put_user: - Write a simple value into user space.
255 * @x: Value to copy to user space.
256 * @ptr: Destination address, in user space.
257 *
258 * Context: User context only. This function may sleep.
259 *
260 * This macro copies a single simple value from kernel space to user
261 * space. It supports simple types like char and int, but not larger
262 * data types like structures or arrays.
263 *
264 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
265 * to the result of dereferencing @ptr.
266 *
267 * Returns zero on success, or -EFAULT on error.
268 */
269
190#define __put_user(x, ptr) \ 270#define __put_user(x, ptr) \
191({ \ 271({ \
192 __typeof__(*(ptr)) volatile __gu_val = (x); \ 272 __typeof__(*(ptr)) volatile __gu_val = (x); \
@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count);
195 case 1: \ 275 case 1: \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \ 276 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
197 break; \ 277 break; \
198 case 2: \ 278 case 2: \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \ 279 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
200 break; \ 280 break; \
201 case 4: \ 281 case 4: \
@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count);
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \ 285 __put_user_asm_8((ptr), __gu_val, __gu_err); \
206 break; \ 286 break; \
207 default: \ 287 default: \
208 __gu_err = -EINVAL; \ 288 /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
209 } \ 289 } \
210 __gu_err; \ 290 __gu_err; \
211}) 291})
212 292
213#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \ 293#ifndef CONFIG_MMU
214({ \
215__asm__ __volatile__ (" lwi %0, %1, 0; \
216 1: swi %0, %2, 0; \
217 lwi %0, %1, 4; \
218 2: swi %0, %2, 4; \
219 addk %0,r0,r0; \
220 3: \
221 .section .fixup,\"ax\"; \
222 4: brid 3b; \
223 addik %0, r0, %3; \
224 .previous; \
225 .section __ex_table,\"a\"; \
226 .word 1b,4b,2b,4b; \
227 .previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(&__gu_val), \
230 "r"(__gu_ptr), "i"(-EFAULT) \
231 ); \
232})
233 294
234#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \ 295#define put_user(x, ptr) __put_user((x), (ptr))
235({ \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
238 addk %0, r0, r0; \
239 2: \
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err) \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251 296
252/* 297#else /* CONFIG_MMU */
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
254 */
255static inline int clear_user(char *to, int size)
256{
257 if (size && access_ok(VERIFY_WRITE, to, size)) {
258 __asm__ __volatile__ (" \
259 1: \
260 sb r0, %2, r0; \
261 addik %0, %0, -1; \
262 bneid %0, 1b; \
263 addik %2, %2, 1; \
264 2: \
265 .section __ex_table,\"a\"; \
266 .word 1b,2b; \
267 .section .text;" \
268 : "=r"(size) \
269 : "0"(size), "r"(to)
270 );
271 }
272 return size;
273}
274 298
275#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n)) 299#define put_user(x, ptr) \
300({ \
301 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
302 ? __put_user((x), (ptr)) : -EFAULT; \
303})
304#endif /* CONFIG_MMU */
305
306/* copy_to_from_user */
307#define __copy_from_user(to, from, n) \
308 __copy_tofrom_user((__force void __user *)(to), \
309 (void __user *)(from), (n))
276#define __copy_from_user_inatomic(to, from, n) \ 310#define __copy_from_user_inatomic(to, from, n) \
277 copy_from_user((to), (from), (n)) 311 copy_from_user((to), (from), (n))
278 312
279#define copy_to_user(to, from, n) \ 313static inline long copy_from_user(void *to,
280 (access_ok(VERIFY_WRITE, (to), (n)) ? \ 314 const void __user *from, unsigned long n)
281 __copy_tofrom_user((void __user *)(to), \ 315{
282 (__force const void __user *)(from), (n)) \ 316 might_sleep();
283 : -EFAULT) 317 if (access_ok(VERIFY_READ, from, n))
318 return __copy_from_user(to, from, n);
319 return n;
320}
284 321
285#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n)) 322#define __copy_to_user(to, from, n) \
323 __copy_tofrom_user((void __user *)(to), \
324 (__force const void __user *)(from), (n))
286#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) 325#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
287 326
288#define copy_from_user(to, from, n) \ 327static inline long copy_to_user(void __user *to,
289 (access_ok(VERIFY_READ, (from), (n)) ? \ 328 const void *from, unsigned long n)
290 __copy_tofrom_user((__force void __user *)(to), \ 329{
291 (void __user *)(from), (n)) \ 330 might_sleep();
292 : -EFAULT) 331 if (access_ok(VERIFY_WRITE, to, n))
332 return __copy_to_user(to, from, n);
333 return n;
334}
293 335
336/*
337 * Copy a null terminated string from userspace.
338 */
294extern int __strncpy_user(char *to, const char __user *from, int len); 339extern int __strncpy_user(char *to, const char __user *from, int len);
295extern int __strnlen_user(const char __user *sstr, int len);
296 340
297#define strncpy_from_user(to, from, len) \ 341#define __strncpy_from_user __strncpy_user
298 (access_ok(VERIFY_READ, from, 1) ? \
299 __strncpy_user(to, from, len) : -EFAULT)
300#define strnlen_user(str, len) \
301 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
302 342
303#endif /* CONFIG_MMU */ 343static inline long
304 344strncpy_from_user(char *dst, const char __user *src, long count)
305extern unsigned long __copy_tofrom_user(void __user *to, 345{
306 const void __user *from, unsigned long size); 346 if (!access_ok(VERIFY_READ, src, 1))
347 return -EFAULT;
348 return __strncpy_from_user(dst, src, count);
349}
307 350
308/* 351/*
309 * The exception table consists of pairs of addresses: the first is the 352 * Return the size of a string (including the ending 0)
310 * address of an instruction that is allowed to fault, and the second is
311 * the address at which the program should continue. No registers are
312 * modified, so it is entirely up to the continuation code to figure out
313 * what to do.
314 * 353 *
315 * All the routines below use bits of fixup code that are out of line 354 * Return 0 on exception, a value greater than N if too long
316 * with the main instruction path. This means when everything is well,
317 * we don't even have to jump over them. Further, they do not intrude
318 * on our cache or tlb entries.
319 */ 355 */
320struct exception_table_entry { 356extern int __strnlen_user(const char __user *sstr, int len);
321 unsigned long insn, fixup; 357
322}; 358static inline long strnlen_user(const char __user *src, long n)
359{
360 if (!access_ok(VERIFY_READ, src, 1))
361 return 0;
362 return __strnlen_user(src, n);
363}
323 364
324#endif /* __ASSEMBLY__ */ 365#endif /* __ASSEMBLY__ */
325#endif /* __KERNEL__ */ 366#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index b07594eccf9b..e51bc1520825 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -14,7 +14,7 @@ endif
14 14
15extra-y := head.o vmlinux.lds 15extra-y := head.o vmlinux.lds
16 16
17obj-y += exceptions.o \ 17obj-y += dma.o exceptions.o \
18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ 18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
19 of_platform.o process.o prom.o prom_parse.o ptrace.o \ 19 of_platform.o process.o prom.o prom_parse.o ptrace.o \
20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o 20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 7bc7b68f97db..0071260a672c 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -90,6 +90,7 @@ int main(int argc, char *argv[])
90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
93 DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
93 BLANK(); 94 BLANK();
94 95
95 /* struct cpu_context */ 96 /* struct cpu_context */
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 2a56bccce4e0..f04d8a86dead 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h> 16#include <asm/pvr.h>
17 17
18static inline void __invalidate_flush_icache(unsigned int addr)
19{
20 __asm__ __volatile__ ("wic %0, r0;" \
21 : : "r" (addr));
22}
23
24static inline void __flush_dcache(unsigned int addr)
25{
26 __asm__ __volatile__ ("wdc.flush %0, r0;" \
27 : : "r" (addr));
28}
29
30static inline void __invalidate_dcache(unsigned int baseaddr,
31 unsigned int offset)
32{
33 __asm__ __volatile__ ("wdc.clear %0, %1;" \
34 : : "r" (baseaddr), "r" (offset));
35}
36
37static inline void __enable_icache_msr(void) 18static inline void __enable_icache_msr(void)
38{ 19{
39 __asm__ __volatile__ (" msrset r0, %0; \ 20 __asm__ __volatile__ (" msrset r0, %0; \
@@ -148,9 +129,9 @@ do { \
148 int step = -line_length; \ 129 int step = -line_length; \
149 BUG_ON(step >= 0); \ 130 BUG_ON(step >= 0); \
150 \ 131 \
151 __asm__ __volatile__ (" 1: " #op " r0, %0; \ 132 __asm__ __volatile__ (" 1: " #op " r0, %0; \
152 bgtid %0, 1b; \ 133 bgtid %0, 1b; \
153 addk %0, %0, %1; \ 134 addk %0, %0, %1; \
154 " : : "r" (len), "r" (step) \ 135 " : : "r" (len), "r" (step) \
155 : "memory"); \ 136 : "memory"); \
156} while (0); 137} while (0);
@@ -162,9 +143,9 @@ do { \
162 int count = end - start; \ 143 int count = end - start; \
163 BUG_ON(count <= 0); \ 144 BUG_ON(count <= 0); \
164 \ 145 \
165 __asm__ __volatile__ (" 1: " #op " %0, %1; \ 146 __asm__ __volatile__ (" 1: " #op " %0, %1; \
166 bgtid %1, 1b; \ 147 bgtid %1, 1b; \
167 addk %1, %1, %2; \ 148 addk %1, %1, %2; \
168 " : : "r" (start), "r" (count), \ 149 " : : "r" (start), "r" (count), \
169 "r" (step) : "memory"); \ 150 "r" (step) : "memory"); \
170} while (0); 151} while (0);
@@ -175,7 +156,7 @@ do { \
175 int volatile temp; \ 156 int volatile temp; \
176 BUG_ON(end - start <= 0); \ 157 BUG_ON(end - start <= 0); \
177 \ 158 \
178 __asm__ __volatile__ (" 1: " #op " %1, r0; \ 159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
179 cmpu %0, %1, %2; \ 160 cmpu %0, %1, %2; \
180 bgtid %0, 1b; \ 161 bgtid %0, 1b; \
181 addk %1, %1, %3; \ 162 addk %1, %1, %3; \
@@ -183,10 +164,14 @@ do { \
183 "r" (line_length) : "memory"); \ 164 "r" (line_length) : "memory"); \
184} while (0); 165} while (0);
185 166
167#define ASM_LOOP
168
186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 169static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
187{ 170{
188 unsigned long flags; 171 unsigned long flags;
189 172#ifndef ASM_LOOP
173 int i;
174#endif
190 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
191 (unsigned int)start, (unsigned int) end); 176 (unsigned int)start, (unsigned int) end);
192 177
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
196 local_irq_save(flags); 181 local_irq_save(flags);
197 __disable_icache_msr(); 182 __disable_icache_msr();
198 183
184#ifdef ASM_LOOP
199 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
200 186#else
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
189 : : "r" (i));
190#endif
201 __enable_icache_msr(); 191 __enable_icache_msr();
202 local_irq_restore(flags); 192 local_irq_restore(flags);
203} 193}
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
206 unsigned long end) 196 unsigned long end)
207{ 197{
208 unsigned long flags; 198 unsigned long flags;
209 199#ifndef ASM_LOOP
200 int i;
201#endif
210 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
211 (unsigned int)start, (unsigned int) end); 203 (unsigned int)start, (unsigned int) end);
212 204
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
216 local_irq_save(flags); 208 local_irq_save(flags);
217 __disable_icache_nomsr(); 209 __disable_icache_nomsr();
218 210
211#ifdef ASM_LOOP
219 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
213#else
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
216 : : "r" (i));
217#endif
220 218
221 __enable_icache_nomsr(); 219 __enable_icache_nomsr();
222 local_irq_restore(flags); 220 local_irq_restore(flags);
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
225static void __flush_icache_range_noirq(unsigned long start, 223static void __flush_icache_range_noirq(unsigned long start,
226 unsigned long end) 224 unsigned long end)
227{ 225{
226#ifndef ASM_LOOP
227 int i;
228#endif
228 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
229 (unsigned int)start, (unsigned int) end); 230 (unsigned int)start, (unsigned int) end);
230 231
231 CACHE_LOOP_LIMITS(start, end, 232 CACHE_LOOP_LIMITS(start, end,
232 cpuinfo.icache_line_length, cpuinfo.icache_size); 233 cpuinfo.icache_line_length, cpuinfo.icache_size);
234#ifdef ASM_LOOP
233 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
236#else
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
239 : : "r" (i));
240#endif
234} 241}
235 242
236static void __flush_icache_all_msr_irq(void) 243static void __flush_icache_all_msr_irq(void)
237{ 244{
238 unsigned long flags; 245 unsigned long flags;
239 246#ifndef ASM_LOOP
247 int i;
248#endif
240 pr_debug("%s\n", __func__); 249 pr_debug("%s\n", __func__);
241 250
242 local_irq_save(flags); 251 local_irq_save(flags);
243 __disable_icache_msr(); 252 __disable_icache_msr();
244 253#ifdef ASM_LOOP
245 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
246 255#else
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
259 : : "r" (i));
260#endif
247 __enable_icache_msr(); 261 __enable_icache_msr();
248 local_irq_restore(flags); 262 local_irq_restore(flags);
249} 263}
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
251static void __flush_icache_all_nomsr_irq(void) 265static void __flush_icache_all_nomsr_irq(void)
252{ 266{
253 unsigned long flags; 267 unsigned long flags;
254 268#ifndef ASM_LOOP
269 int i;
270#endif
255 pr_debug("%s\n", __func__); 271 pr_debug("%s\n", __func__);
256 272
257 local_irq_save(flags); 273 local_irq_save(flags);
258 __disable_icache_nomsr(); 274 __disable_icache_nomsr();
259 275#ifdef ASM_LOOP
260 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
261 277#else
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
281 : : "r" (i));
282#endif
262 __enable_icache_nomsr(); 283 __enable_icache_nomsr();
263 local_irq_restore(flags); 284 local_irq_restore(flags);
264} 285}
265 286
266static void __flush_icache_all_noirq(void) 287static void __flush_icache_all_noirq(void)
267{ 288{
289#ifndef ASM_LOOP
290 int i;
291#endif
268 pr_debug("%s\n", __func__); 292 pr_debug("%s\n", __func__);
293#ifdef ASM_LOOP
269 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
295#else
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
299 : : "r" (i));
300#endif
270} 301}
271 302
272static void __invalidate_dcache_all_msr_irq(void) 303static void __invalidate_dcache_all_msr_irq(void)
273{ 304{
274 unsigned long flags; 305 unsigned long flags;
275 306#ifndef ASM_LOOP
307 int i;
308#endif
276 pr_debug("%s\n", __func__); 309 pr_debug("%s\n", __func__);
277 310
278 local_irq_save(flags); 311 local_irq_save(flags);
279 __disable_dcache_msr(); 312 __disable_dcache_msr();
280 313#ifdef ASM_LOOP
281 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
282 315#else
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
319 : : "r" (i));
320#endif
283 __enable_dcache_msr(); 321 __enable_dcache_msr();
284 local_irq_restore(flags); 322 local_irq_restore(flags);
285} 323}
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
287static void __invalidate_dcache_all_nomsr_irq(void) 325static void __invalidate_dcache_all_nomsr_irq(void)
288{ 326{
289 unsigned long flags; 327 unsigned long flags;
290 328#ifndef ASM_LOOP
329 int i;
330#endif
291 pr_debug("%s\n", __func__); 331 pr_debug("%s\n", __func__);
292 332
293 local_irq_save(flags); 333 local_irq_save(flags);
294 __disable_dcache_nomsr(); 334 __disable_dcache_nomsr();
295 335#ifdef ASM_LOOP
296 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
297 337#else
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
341 : : "r" (i));
342#endif
298 __enable_dcache_nomsr(); 343 __enable_dcache_nomsr();
299 local_irq_restore(flags); 344 local_irq_restore(flags);
300} 345}
301 346
302static void __invalidate_dcache_all_noirq_wt(void) 347static void __invalidate_dcache_all_noirq_wt(void)
303{ 348{
349#ifndef ASM_LOOP
350 int i;
351#endif
304 pr_debug("%s\n", __func__); 352 pr_debug("%s\n", __func__);
353#ifdef ASM_LOOP
305 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
355#else
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
359 : : "r" (i));
360#endif
306} 361}
307 362
308/* FIXME this is weird - should be only wdc but not work 363/* FIXME this is weird - should be only wdc but not work
309 * MS: I am getting bus errors and other weird things */ 364 * MS: I am getting bus errors and other weird things */
310static void __invalidate_dcache_all_wb(void) 365static void __invalidate_dcache_all_wb(void)
311{ 366{
367#ifndef ASM_LOOP
368 int i;
369#endif
312 pr_debug("%s\n", __func__); 370 pr_debug("%s\n", __func__);
371#ifdef ASM_LOOP
313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 372 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
314 wdc.clear) 373 wdc.clear)
374#else
375 for (i = 0; i < cpuinfo.dcache_size;
376 i += cpuinfo.dcache_line_length)
377 __asm__ __volatile__ ("wdc.clear %0, r0;" \
378 : : "r" (i));
379#endif
315} 380}
316 381
317static void __invalidate_dcache_range_wb(unsigned long start, 382static void __invalidate_dcache_range_wb(unsigned long start,
318 unsigned long end) 383 unsigned long end)
319{ 384{
385#ifndef ASM_LOOP
386 int i;
387#endif
320 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 388 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
321 (unsigned int)start, (unsigned int) end); 389 (unsigned int)start, (unsigned int) end);
322 390
323 CACHE_LOOP_LIMITS(start, end, 391 CACHE_LOOP_LIMITS(start, end,
324 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 392 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
393#ifdef ASM_LOOP
325 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 394 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
395#else
396 for (i = start; i < end; i += cpuinfo.icache_line_length)
397 __asm__ __volatile__ ("wdc.clear %0, r0;" \
398 : : "r" (i));
399#endif
326} 400}
327 401
328static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 402static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
329 unsigned long end) 403 unsigned long end)
330{ 404{
405#ifndef ASM_LOOP
406 int i;
407#endif
331 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 408 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
332 (unsigned int)start, (unsigned int) end); 409 (unsigned int)start, (unsigned int) end);
333 CACHE_LOOP_LIMITS(start, end, 410 CACHE_LOOP_LIMITS(start, end,
334 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 411 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
335 412
413#ifdef ASM_LOOP
336 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 414 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
415#else
416 for (i = start; i < end; i += cpuinfo.icache_line_length)
417 __asm__ __volatile__ ("wdc %0, r0;" \
418 : : "r" (i));
419#endif
337} 420}
338 421
339static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 422static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
340 unsigned long end) 423 unsigned long end)
341{ 424{
342 unsigned long flags; 425 unsigned long flags;
343 426#ifndef ASM_LOOP
427 int i;
428#endif
344 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 429 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
345 (unsigned int)start, (unsigned int) end); 430 (unsigned int)start, (unsigned int) end);
346 CACHE_LOOP_LIMITS(start, end, 431 CACHE_LOOP_LIMITS(start, end,
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
349 local_irq_save(flags); 434 local_irq_save(flags);
350 __disable_dcache_msr(); 435 __disable_dcache_msr();
351 436
437#ifdef ASM_LOOP
352 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 438 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
439#else
440 for (i = start; i < end; i += cpuinfo.icache_line_length)
441 __asm__ __volatile__ ("wdc %0, r0;" \
442 : : "r" (i));
443#endif
353 444
354 __enable_dcache_msr(); 445 __enable_dcache_msr();
355 local_irq_restore(flags); 446 local_irq_restore(flags);
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
359 unsigned long end) 450 unsigned long end)
360{ 451{
361 unsigned long flags; 452 unsigned long flags;
362 453#ifndef ASM_LOOP
454 int i;
455#endif
363 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 456 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
364 (unsigned int)start, (unsigned int) end); 457 (unsigned int)start, (unsigned int) end);
365 458
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
369 local_irq_save(flags); 462 local_irq_save(flags);
370 __disable_dcache_nomsr(); 463 __disable_dcache_nomsr();
371 464
465#ifdef ASM_LOOP
372 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 466 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
467#else
468 for (i = start; i < end; i += cpuinfo.icache_line_length)
469 __asm__ __volatile__ ("wdc %0, r0;" \
470 : : "r" (i));
471#endif
373 472
374 __enable_dcache_nomsr(); 473 __enable_dcache_nomsr();
375 local_irq_restore(flags); 474 local_irq_restore(flags);
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
377 476
378static void __flush_dcache_all_wb(void) 477static void __flush_dcache_all_wb(void)
379{ 478{
479#ifndef ASM_LOOP
480 int i;
481#endif
380 pr_debug("%s\n", __func__); 482 pr_debug("%s\n", __func__);
483#ifdef ASM_LOOP
381 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 484 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
382 wdc.flush); 485 wdc.flush);
486#else
487 for (i = 0; i < cpuinfo.dcache_size;
488 i += cpuinfo.dcache_line_length)
489 __asm__ __volatile__ ("wdc.flush %0, r0;" \
490 : : "r" (i));
491#endif
383} 492}
384 493
385static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 494static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
386{ 495{
496#ifndef ASM_LOOP
497 int i;
498#endif
387 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 499 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388 (unsigned int)start, (unsigned int) end); 500 (unsigned int)start, (unsigned int) end);
389 501
390 CACHE_LOOP_LIMITS(start, end, 502 CACHE_LOOP_LIMITS(start, end,
391 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 503 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
504#ifdef ASM_LOOP
392 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 505 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
506#else
507 for (i = start; i < end; i += cpuinfo.icache_line_length)
508 __asm__ __volatile__ ("wdc.flush %0, r0;" \
509 : : "r" (i));
510#endif
393} 511}
394 512
395/* struct for wb caches and for wt caches */ 513/* struct for wb caches and for wt caches */
@@ -493,7 +611,7 @@ const struct scache wt_nomsr_noirq = {
493#define CPUVER_7_20_A 0x0c 611#define CPUVER_7_20_A 0x0c
494#define CPUVER_7_20_D 0x0f 612#define CPUVER_7_20_D 0x0f
495 613
496#define INFO(s) printk(KERN_INFO "cache: " s " \n"); 614#define INFO(s) printk(KERN_INFO "cache: " s "\n");
497 615
498void microblaze_cache_init(void) 616void microblaze_cache_init(void)
499{ 617{
@@ -532,4 +650,9 @@ void microblaze_cache_init(void)
532 } 650 }
533 } 651 }
534 } 652 }
653 invalidate_dcache();
654 enable_dcache();
655
656 invalidate_icache();
657 enable_icache();
535} 658}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 991d71311b0e..255ef880351e 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -9,7 +9,6 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/slab.h>
13#include <asm/cpuinfo.h> 12#include <asm/cpuinfo.h>
14#include <asm/pvr.h> 13#include <asm/pvr.h>
15 14
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 000000000000..ce72dd4967cf
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/gfp.h>
12#include <linux/dma-debug.h>
13#include <asm/bug.h>
14#include <asm/cacheflush.h>
15
16/*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
24static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
25 size_t size, enum dma_data_direction direction)
26{
27 switch (direction) {
28 case DMA_TO_DEVICE:
29 flush_dcache_range(paddr + offset, paddr + offset + size);
30 break;
31 case DMA_FROM_DEVICE:
32 invalidate_dcache_range(paddr + offset, paddr + offset + size);
33 break;
34 default:
35 BUG();
36 }
37}
38
39static unsigned long get_dma_direct_offset(struct device *dev)
40{
41 if (likely(dev))
42 return (unsigned long)dev->archdata.dma_data;
43
44 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
45}
46
47#define NOT_COHERENT_CACHE
48
49static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
50 dma_addr_t *dma_handle, gfp_t flag)
51{
52#ifdef NOT_COHERENT_CACHE
53 return consistent_alloc(flag, size, dma_handle);
54#else
55 void *ret;
56 struct page *page;
57 int node = dev_to_node(dev);
58
59 /* ignore region specifiers */
60 flag &= ~(__GFP_HIGHMEM);
61
62 page = alloc_pages_node(node, flag, get_order(size));
63 if (page == NULL)
64 return NULL;
65 ret = page_address(page);
66 memset(ret, 0, size);
67 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
68
69 return ret;
70#endif
71}
72
73static void dma_direct_free_coherent(struct device *dev, size_t size,
74 void *vaddr, dma_addr_t dma_handle)
75{
76#ifdef NOT_COHERENT_CACHE
77 consistent_free(vaddr);
78#else
79 free_pages((unsigned long)vaddr, get_order(size));
80#endif
81}
82
83static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
84 int nents, enum dma_data_direction direction,
85 struct dma_attrs *attrs)
86{
87 struct scatterlist *sg;
88 int i;
89
90 /* FIXME this part of code is untested */
91 for_each_sg(sgl, sg, nents, i) {
92 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
93 sg->dma_length = sg->length;
94 __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
95 sg->length, direction);
96 }
97
98 return nents;
99}
100
101static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
102 int nents, enum dma_data_direction direction,
103 struct dma_attrs *attrs)
104{
105}
106
107static int dma_direct_dma_supported(struct device *dev, u64 mask)
108{
109 return 1;
110}
111
112static inline dma_addr_t dma_direct_map_page(struct device *dev,
113 struct page *page,
114 unsigned long offset,
115 size_t size,
116 enum dma_data_direction direction,
117 struct dma_attrs *attrs)
118{
119 __dma_sync_page(page_to_phys(page), offset, size, direction);
120 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
121}
122
123static inline void dma_direct_unmap_page(struct device *dev,
124 dma_addr_t dma_address,
125 size_t size,
126 enum dma_data_direction direction,
127 struct dma_attrs *attrs)
128{
129/* There is not necessary to do cache cleanup
130 *
131 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
132 * dma_address is physical address
133 */
134 __dma_sync_page(dma_address, 0 , size, direction);
135}
136
137struct dma_map_ops dma_direct_ops = {
138 .alloc_coherent = dma_direct_alloc_coherent,
139 .free_coherent = dma_direct_free_coherent,
140 .map_sg = dma_direct_map_sg,
141 .unmap_sg = dma_direct_unmap_sg,
142 .dma_supported = dma_direct_dma_supported,
143 .map_page = dma_direct_map_page,
144 .unmap_page = dma_direct_unmap_page,
145};
146EXPORT_SYMBOL(dma_direct_ops);
147
148/* Number of entries preallocated for DMA-API debugging */
149#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
150
151static int __init dma_init(void)
152{
153 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
154
155 return 0;
156}
157fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 3bad4ff49471..c0ede25c5b99 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception):
305 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
306 addi r11, r0, 1; 306 addi r11, r0, 1;
307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3082: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 3082: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
309 /* Save away the syscall number. */ 309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0; 310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1) 311 tovirt(r1,r1)
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception):
322 rtid r11, 0 322 rtid r11, 0
323 nop 323 nop
3243: 3243:
325 add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ 325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
326 lwi r11, r11, TS_THREAD_INFO /* get thread info */
327 lwi r11, r11, TI_FLAGS /* get flags in thread info */ 326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
328 andi r11, r11, _TIF_WORK_SYSCALL_MASK 327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
329 beqi r11, 4f 328 beqi r11, 4f
@@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap):
382/* See if returning to kernel mode, if so, skip resched &c. */ 381/* See if returning to kernel mode, if so, skip resched &c. */
383 bnei r11, 2f; 382 bnei r11, 2f;
384 383
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
385 /* We're returning to user mode, so check for various conditions that 387 /* We're returning to user mode, so check for various conditions that
386 * trigger rescheduling. */ 388 * trigger rescheduling. */
387 # FIXME: Restructure all these flag checks. 389 /* FIXME: Restructure all these flag checks. */
388 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 390 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
389 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
390 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 391 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
391 andi r11, r11, _TIF_WORK_SYSCALL_MASK 392 andi r11, r11, _TIF_WORK_SYSCALL_MASK
392 beqi r11, 1f 393 beqi r11, 1f
393 394
394 swi r3, r1, PTO + PT_R3
395 swi r4, r1, PTO + PT_R4
396 brlid r15, do_syscall_trace_leave 395 brlid r15, do_syscall_trace_leave
397 addik r5, r1, PTO + PT_R0 396 addik r5, r1, PTO + PT_R0
398 lwi r3, r1, PTO + PT_R3
399 lwi r4, r1, PTO + PT_R4
4001: 3971:
401
402 /* We're returning to user mode, so check for various conditions that 398 /* We're returning to user mode, so check for various conditions that
403 * trigger rescheduling. */ 399 * trigger rescheduling. */
404 /* Get current task ptr into r11 */ 400 /* get thread info from current task */
405 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 401 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
406 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
407 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
408 andi r11, r11, _TIF_NEED_RESCHED; 403 andi r11, r11, _TIF_NEED_RESCHED;
409 beqi r11, 5f; 404 beqi r11, 5f;
410 405
411 swi r3, r1, PTO + PT_R3; /* store syscall result */
412 swi r4, r1, PTO + PT_R4;
413 bralid r15, schedule; /* Call scheduler */ 406 bralid r15, schedule; /* Call scheduler */
414 nop; /* delay slot */ 407 nop; /* delay slot */
415 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
416 lwi r4, r1, PTO + PT_R4;
417 408
418 /* Maybe handle a signal */ 409 /* Maybe handle a signal */
4195: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 4105: /* get thread info from current task*/
420 lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 411 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
421 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 412 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
422 andi r11, r11, _TIF_SIGPENDING; 413 andi r11, r11, _TIF_SIGPENDING;
423 beqi r11, 1f; /* Signals to handle, handle them */ 414 beqi r11, 1f; /* Signals to handle, handle them */
424 415
425 swi r3, r1, PTO + PT_R3; /* store syscall result */
426 swi r4, r1, PTO + PT_R4;
427 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 416 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
428 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
429 addi r7, r0, 1; /* Arg 3: int in_syscall */ 417 addi r7, r0, 1; /* Arg 3: int in_syscall */
430 bralid r15, do_signal; /* Handle any signals */ 418 bralid r15, do_signal; /* Handle any signals */
431 nop; 419 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
420
421/* Finally, return to user state. */
4221:
432 lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 423 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
433 lwi r4, r1, PTO + PT_R4; 424 lwi r4, r1, PTO + PT_R4;
434 425
435/* Finally, return to user state. */ 426 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
4361: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 427 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
437 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
438 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
439 VM_OFF; 428 VM_OFF;
440 tophys(r1,r1); 429 tophys(r1,r1);
441 RESTORE_REGS; 430 RESTORE_REGS;
@@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
565 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ 554 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
566 addi r11, r0, 1; \ 555 addi r11, r0, 1; \
567 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ 556 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5682: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ 5572: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
569 /* Save away the syscall number. */ \ 558 /* Save away the syscall number. */ \
570 swi r0, r1, PTO+PT_R0; \ 559 swi r0, r1, PTO+PT_R0; \
571 tovirt(r1,r1) 560 tovirt(r1,r1)
@@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc):
673 662
674 /* We're returning to user mode, so check for various conditions that 663 /* We're returning to user mode, so check for various conditions that
675 trigger rescheduling. */ 664 trigger rescheduling. */
676 /* Get current task ptr into r11 */ 665 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
677 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
678 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
679 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 666 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
680 andi r11, r11, _TIF_NEED_RESCHED; 667 andi r11, r11, _TIF_NEED_RESCHED;
681 beqi r11, 5f; 668 beqi r11, 5f;
@@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc):
685 nop; /* delay slot */ 672 nop; /* delay slot */
686 673
687 /* Maybe handle a signal */ 674 /* Maybe handle a signal */
6885: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 6755: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
689 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
690 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 676 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
691 andi r11, r11, _TIF_SIGPENDING; 677 andi r11, r11, _TIF_SIGPENDING;
692 beqi r11, 1f; /* Signals to handle, handle them */ 678 beqi r11, 1f; /* Signals to handle, handle them */
@@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc):
705 * store return registers separately because this macros is use 691 * store return registers separately because this macros is use
706 * for others exceptions */ 692 * for others exceptions */
707 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 693 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
708 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
709 addi r7, r0, 0; /* Arg 3: int in_syscall */ 694 addi r7, r0, 0; /* Arg 3: int in_syscall */
710 bralid r15, do_signal; /* Handle any signals */ 695 bralid r15, do_signal; /* Handle any signals */
711 nop; 696 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
712 697
713/* Finally, return to user state. */ 698/* Finally, return to user state. */
7141: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 6991: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
715 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 700 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
716 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
717 VM_OFF; 701 VM_OFF;
718 tophys(r1,r1); 702 tophys(r1,r1);
719 703
@@ -802,7 +786,7 @@ C_ENTRY(_interrupt):
802 swi r11, r0, TOPHYS(PER_CPU(KM)); 786 swi r11, r0, TOPHYS(PER_CPU(KM));
803 787
8042: 7882:
805 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 789 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
806 swi r0, r1, PTO + PT_R0; 790 swi r0, r1, PTO + PT_R0;
807 tovirt(r1,r1) 791 tovirt(r1,r1)
808 la r5, r1, PTO; 792 la r5, r1, PTO;
@@ -817,8 +801,7 @@ ret_from_irq:
817 lwi r11, r1, PTO + PT_MODE; 801 lwi r11, r1, PTO + PT_MODE;
818 bnei r11, 2f; 802 bnei r11, 2f;
819 803
820 add r11, r0, CURRENT_TASK; 804 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
821 lwi r11, r11, TS_THREAD_INFO;
822 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ 805 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
823 andi r11, r11, _TIF_NEED_RESCHED; 806 andi r11, r11, _TIF_NEED_RESCHED;
824 beqi r11, 5f 807 beqi r11, 5f
@@ -826,8 +809,7 @@ ret_from_irq:
826 nop; /* delay slot */ 809 nop; /* delay slot */
827 810
828 /* Maybe handle a signal */ 811 /* Maybe handle a signal */
8295: add r11, r0, CURRENT_TASK; 8125: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
830 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
831 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 813 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
832 andi r11, r11, _TIF_SIGPENDING; 814 andi r11, r11, _TIF_SIGPENDING;
833 beqid r11, no_intr_resched 815 beqid r11, no_intr_resched
@@ -842,8 +824,7 @@ no_intr_resched:
842 /* Disable interrupts, we are now committed to the state restore */ 824 /* Disable interrupts, we are now committed to the state restore */
843 disable_irq 825 disable_irq
844 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ 826 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
845 add r11, r0, CURRENT_TASK; 827 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
846 swi r11, r0, PER_CPU(CURRENT_SAVE);
847 VM_OFF; 828 VM_OFF;
848 tophys(r1,r1); 829 tophys(r1,r1);
849 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 830 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@@ -853,7 +834,28 @@ no_intr_resched:
853 lwi r1, r1, PT_R1 - PT_SIZE; 834 lwi r1, r1, PT_R1 - PT_SIZE;
854 bri 6f; 835 bri 6f;
855/* MS: Return to kernel state. */ 836/* MS: Return to kernel state. */
8562: VM_OFF /* MS: turn off MMU */ 8372:
838#ifdef CONFIG_PREEMPT
839 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
840 /* MS: get preempt_count from thread info */
841 lwi r5, r11, TI_PREEMPT_COUNT;
842 bgti r5, restore;
843
844 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
845 andi r5, r5, _TIF_NEED_RESCHED;
846 beqi r5, restore /* if zero jump over */
847
848preempt:
849 /* interrupts are off that's why I am calling preempt_chedule_irq */
850 bralid r15, preempt_schedule_irq
851 nop
852 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
853 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
854 andi r5, r5, _TIF_NEED_RESCHED;
855 bnei r5, preempt /* if non zero jump to resched */
856restore:
857#endif
858 VM_OFF /* MS: turn off MMU */
857 tophys(r1,r1) 859 tophys(r1,r1)
858 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 860 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
859 lwi r4, r1, PTO + PT_R4; 861 lwi r4, r1, PTO + PT_R4;
@@ -915,7 +917,7 @@ C_ENTRY(_debug_exception):
915 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 917 swi r11, r1, PTO+PT_R1; /* Store user SP. */
916 addi r11, r0, 1; 918 addi r11, r0, 1;
917 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 919 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9182: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 9202: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
919 /* Save away the syscall number. */ 921 /* Save away the syscall number. */
920 swi r0, r1, PTO+PT_R0; 922 swi r0, r1, PTO+PT_R0;
921 tovirt(r1,r1) 923 tovirt(r1,r1)
@@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
935 bnei r11, 2f; 937 bnei r11, 2f;
936 938
937 /* Get current task ptr into r11 */ 939 /* Get current task ptr into r11 */
938 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 940 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
939 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
940 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 941 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
941 andi r11, r11, _TIF_NEED_RESCHED; 942 andi r11, r11, _TIF_NEED_RESCHED;
942 beqi r11, 5f; 943 beqi r11, 5f;
@@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
949 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ 950 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
950 951
951 /* Maybe handle a signal */ 952 /* Maybe handle a signal */
9525: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 9535: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
953 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
955 andi r11, r11, _TIF_SIGPENDING; 955 andi r11, r11, _TIF_SIGPENDING;
956 beqi r11, 1f; /* Signals to handle, handle them */ 956 beqi r11, 1f; /* Signals to handle, handle them */
@@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
966 (in a possibly modified form) after do_signal returns. */ 966 (in a possibly modified form) after do_signal returns. */
967 967
968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
969 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
970 addi r7, r0, 0; /* Arg 3: int in_syscall */ 969 addi r7, r0, 0; /* Arg 3: int in_syscall */
971 bralid r15, do_signal; /* Handle any signals */ 970 bralid r15, do_signal; /* Handle any signals */
972 nop; 971 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
973 972
974 973
975/* Finally, return to user state. */ 974/* Finally, return to user state. */
9761: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 9751: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
977 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 976 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
978 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
979 VM_OFF; 977 VM_OFF;
980 tophys(r1,r1); 978 tophys(r1,r1);
981 979
@@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
1007 1005
1008ENTRY(_switch_to) 1006ENTRY(_switch_to)
1009 /* prepare return value */ 1007 /* prepare return value */
1010 addk r3, r0, r31 1008 addk r3, r0, CURRENT_TASK
1011 1009
1012 /* save registers in cpu_context */ 1010 /* save registers in cpu_context */
1013 /* use r11 and r12, volatile registers, as temp register */ 1011 /* use r11 and r12, volatile registers, as temp register */
@@ -1051,10 +1049,10 @@ ENTRY(_switch_to)
1051 nop 1049 nop
1052 swi r12, r11, CC_FSR 1050 swi r12, r11, CC_FSR
1053 1051
1054 /* update r31, the current */ 1052 /* update r31, the current-give me pointer to task which will be next */
1055 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ 1053 lwi CURRENT_TASK, r6, TI_TASK
1056 /* stored it to current_save too */ 1054 /* stored it to current_save too */
1057 swi r31, r0, PER_CPU(CURRENT_SAVE) 1055 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1058 1056
1059 /* get new process' cpu context and restore */ 1057 /* get new process' cpu context and restore */
1060 /* give me start where start context of next task */ 1058 /* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index 388b31ca65a1..515feb404555 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -151,13 +151,10 @@ int ftrace_make_nop(struct module *mod,
151 return ret; 151 return ret;
152} 152}
153 153
154static int ret_addr; /* initialized as 0 by default */
155
156/* I believe that first is called ftrace_make_nop before this function */ 154/* I believe that first is called ftrace_make_nop before this function */
157int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 155int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
158{ 156{
159 int ret; 157 int ret;
160 ret_addr = addr; /* saving where the barrier jump is */
161 pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", 158 pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
162 __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); 159 __func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
163 ret = ftrace_modify_code(rec->ip, imm); 160 ret = ftrace_modify_code(rec->ip, imm);
@@ -194,12 +191,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
194 ret = ftrace_modify_code(ip, upper); 191 ret = ftrace_modify_code(ip, upper);
195 ret += ftrace_modify_code(ip + 4, lower); 192 ret += ftrace_modify_code(ip + 4, lower);
196 193
197 /* We just need to remove the rtsd r15, 8 by NOP */ 194 /* We just need to replace the rtsd r15, 8 with NOP */
198 BUG_ON(!ret_addr); 195 ret += ftrace_modify_code((unsigned long)&ftrace_caller,
199 if (ret_addr) 196 MICROBLAZE_NOP);
200 ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP);
201 else
202 ret = 1; /* fault */
203 197
204 /* All changes are done - lets do caches consistent */ 198 /* All changes are done - lets do caches consistent */
205 flush_icache(); 199 flush_icache();
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 30916193fcc7..da6a5f5dc766 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@ swapper_pg_dir:
51 51
52 .text 52 .text
53ENTRY(_start) 53ENTRY(_start)
54#if CONFIG_KERNEL_BASE_ADDR == 0
55 brai TOPHYS(real_start)
56 .org 0x100
57real_start:
58#endif
59
54 mfs r1, rmsr 60 mfs r1, rmsr
55 andi r1, r1, ~2 61 andi r1, r1, ~2
56 mts rmsr, r1 62 mts rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
99 tophys(r4,r4) /* convert to phys address */ 105 tophys(r4,r4) /* convert to phys address */
100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 106 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
101_copy_command_line: 107_copy_command_line:
102 lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 108 lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
103 sb r7, r4, r6 /* addr[r4+r6]= r7*/ 109 sb r2, r4, r6 /* addr[r4+r6]= r2*/
104 addik r6, r6, 1 /* increment counting */ 110 addik r6, r6, 1 /* increment counting */
105 bgtid r3, _copy_command_line /* loop for all entries */ 111 bgtid r3, _copy_command_line /* loop for all entries */
106 addik r3, r3, -1 /* descrement loop */ 112 addik r3, r3, -1 /* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
128 * virtual to physical. 134 * virtual to physical.
129 */ 135 */
130 nop 136 nop
131 addik r3, r0, 63 /* Invalidate all TLB entries */ 137 addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
132_invalidate: 138_invalidate:
133 mts rtlbx, r3 139 mts rtlbx, r3
134 mts rtlbhi, r0 /* flush: ensure V is clear */ 140 mts rtlbhi, r0 /* flush: ensure V is clear */
@@ -136,6 +142,11 @@ _invalidate:
136 addik r3, r3, -1 142 addik r3, r3, -1
137 /* sync */ 143 /* sync */
138 144
145 /* Setup the kernel PID */
146 mts rpid,r0 /* Load the kernel PID */
147 nop
148 bri 4
149
139 /* 150 /*
140 * We should still be executing code at physical address area 151 * We should still be executing code at physical address area
141 * RAM_BASEADDR at this point. However, kernel code is at 152 * RAM_BASEADDR at this point. However, kernel code is at
@@ -146,10 +157,6 @@ _invalidate:
146 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ 157 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
147 tophys(r4,r3) /* Load the kernel physical address */ 158 tophys(r4,r3) /* Load the kernel physical address */
148 159
149 mts rpid,r0 /* Load the kernel PID */
150 nop
151 bri 4
152
153 /* 160 /*
154 * Configure and load two entries into TLB slots 0 and 1. 161 * Configure and load two entries into TLB slots 0 and 1.
155 * In case we are pinning TLBs, these are reserved in by the 162 * In case we are pinning TLBs, these are reserved in by the
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa841..995a2123635b 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
313 mfs r5, rmsr; 313 mfs r5, rmsr;
314 nop 314 nop
315 swi r5, r1, 0; 315 swi r5, r1, 0;
316 mfs r3, resr 316 mfs r4, resr
317 nop 317 nop
318 mfs r4, rear; 318 mfs r3, rear;
319 nop 319 nop
320 320
321#ifndef CONFIG_MMU 321#ifndef CONFIG_MMU
322 andi r5, r3, 0x1000; /* Check ESR[DS] */ 322 andi r5, r4, 0x1000; /* Check ESR[DS] */
323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 323 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 324 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
325 nop 325 nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
327 swi r17, r1, PT_R17 327 swi r17, r1, PT_R17
328#endif 328#endif
329 329
330 andi r5, r3, 0x1F; /* Extract ESR[EXC] */ 330 andi r5, r4, 0x1F; /* Extract ESR[EXC] */
331 331
332#ifdef CONFIG_MMU 332#ifdef CONFIG_MMU
333 /* Calculate exception vector offset = r5 << 2 */ 333 /* Calculate exception vector offset = r5 << 2 */
334 addk r6, r5, r5; /* << 1 */ 334 addk r6, r5, r5; /* << 1 */
335 addk r6, r6, r6; /* << 2 */ 335 addk r6, r6, r6; /* << 2 */
336 336
337#ifdef DEBUG
337/* counting which exception happen */ 338/* counting which exception happen */
338 lwi r5, r0, 0x200 + TOPHYS(r0_ram) 339 lwi r5, r0, 0x200 + TOPHYS(r0_ram)
339 addi r5, r5, 1 340 addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
341 lwi r5, r6, 0x200 + TOPHYS(r0_ram) 342 lwi r5, r6, 0x200 + TOPHYS(r0_ram)
342 addi r5, r5, 1 343 addi r5, r5, 1
343 swi r5, r6, 0x200 + TOPHYS(r0_ram) 344 swi r5, r6, 0x200 + TOPHYS(r0_ram)
345#endif
344/* end */ 346/* end */
345 /* Load the HW Exception vector */ 347 /* Load the HW Exception vector */
346 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) 348 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
376 swi r18, r1, PT_R18 378 swi r18, r1, PT_R18
377 379
378 or r5, r1, r0 380 or r5, r1, r0
379 andi r6, r3, 0x1F; /* Load ESR[EC] */ 381 andi r6, r4, 0x1F; /* Load ESR[EC] */
380 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 382 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
381 swi r7, r1, PT_MODE 383 swi r7, r1, PT_MODE
382 mfs r7, rfsr 384 mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
426 */ 428 */
427handle_unaligned_ex: 429handle_unaligned_ex:
428 /* Working registers already saved: R3, R4, R5, R6 430 /* Working registers already saved: R3, R4, R5, R6
429 * R3 = ESR 431 * R4 = ESR
430 * R4 = EAR 432 * R3 = EAR
431 */ 433 */
432#ifdef CONFIG_MMU 434#ifdef CONFIG_MMU
433 andi r6, r3, 0x1000 /* Check ESR[DS] */ 435 andi r6, r4, 0x1000 /* Check ESR[DS] */
434 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 436 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
435 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 437 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
436 nop 438 nop
@@ -439,7 +441,7 @@ _no_delayslot:
439 RESTORE_STATE; 441 RESTORE_STATE;
440 bri unaligned_data_trap 442 bri unaligned_data_trap
441#endif 443#endif
442 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 444 andi r6, r4, 0x3E0; /* Mask and extract the register operand */
443 srl r6, r6; /* r6 >> 5 */ 445 srl r6, r6; /* r6 >> 5 */
444 srl r6, r6; 446 srl r6, r6;
445 srl r6, r6; 447 srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
448 /* Store the register operand in a temporary location */ 450 /* Store the register operand in a temporary location */
449 sbi r6, r0, TOPHYS(ex_reg_op); 451 sbi r6, r0, TOPHYS(ex_reg_op);
450 452
451 andi r6, r3, 0x400; /* Extract ESR[S] */ 453 andi r6, r4, 0x400; /* Extract ESR[S] */
452 bnei r6, ex_sw; 454 bnei r6, ex_sw;
453ex_lw: 455ex_lw:
454 andi r6, r3, 0x800; /* Extract ESR[W] */ 456 andi r6, r4, 0x800; /* Extract ESR[W] */
455 beqi r6, ex_lhw; 457 beqi r6, ex_lhw;
456 lbui r5, r4, 0; /* Exception address in r4 */ 458 lbui r5, r3, 0; /* Exception address in r3 */
457 /* Load a word, byte-by-byte from destination address 459 /* Load a word, byte-by-byte from destination address
458 and save it in tmp space */ 460 and save it in tmp space */
459 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
460 lbui r5, r4, 1; 462 lbui r5, r3, 1;
461 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
462 lbui r5, r4, 2; 464 lbui r5, r3, 2;
463 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
464 lbui r5, r4, 3; 466 lbui r5, r3, 3;
465 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 467 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
466 /* Get the destination register value into r3 */ 468 /* Get the destination register value into r4 */
467 lwi r3, r0, TOPHYS(ex_tmp_data_loc_0); 469 lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
468 bri ex_lw_tail; 470 bri ex_lw_tail;
469ex_lhw: 471ex_lhw:
470 lbui r5, r4, 0; /* Exception address in r4 */ 472 lbui r5, r3, 0; /* Exception address in r3 */
471 /* Load a half-word, byte-by-byte from destination 473 /* Load a half-word, byte-by-byte from destination
472 address and save it in tmp space */ 474 address and save it in tmp space */
473 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
474 lbui r5, r4, 1; 476 lbui r5, r3, 1;
475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 477 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
476 /* Get the destination register value into r3 */ 478 /* Get the destination register value into r4 */
477 lhui r3, r0, TOPHYS(ex_tmp_data_loc_0); 479 lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
478ex_lw_tail: 480ex_lw_tail:
479 /* Get the destination register number into r5 */ 481 /* Get the destination register number into r5 */
480 lbui r5, r0, TOPHYS(ex_reg_op); 482 lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
502 andi r6, r6, 0x800; /* Extract ESR[W] */ 504 andi r6, r6, 0x800; /* Extract ESR[W] */
503 beqi r6, ex_shw; 505 beqi r6, ex_shw;
504 /* Get the word - delay slot */ 506 /* Get the word - delay slot */
505 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 507 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
506 /* Store the word, byte-by-byte into destination address */ 508 /* Store the word, byte-by-byte into destination address */
507 lbui r3, r0, TOPHYS(ex_tmp_data_loc_0); 509 lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
508 sbi r3, r4, 0; 510 sbi r4, r3, 0;
509 lbui r3, r0, TOPHYS(ex_tmp_data_loc_1); 511 lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
510 sbi r3, r4, 1; 512 sbi r4, r3, 1;
511 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 513 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
512 sbi r3, r4, 2; 514 sbi r4, r3, 2;
513 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 515 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
514 sbi r3, r4, 3; 516 sbi r4, r3, 3;
515 bri ex_handler_done; 517 bri ex_handler_done;
516 518
517ex_shw: 519ex_shw:
518 /* Store the lower half-word, byte-by-byte into destination address */ 520 /* Store the lower half-word, byte-by-byte into destination address */
519 swi r3, r0, TOPHYS(ex_tmp_data_loc_0); 521 swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
520 lbui r3, r0, TOPHYS(ex_tmp_data_loc_2); 522 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
521 sbi r3, r4, 0; 523 sbi r4, r3, 0;
522 lbui r3, r0, TOPHYS(ex_tmp_data_loc_3); 524 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
523 sbi r3, r4, 1; 525 sbi r4, r3, 1;
524ex_sw_end: /* Exception handling of store word, ends. */ 526ex_sw_end: /* Exception handling of store word, ends. */
525 527
526ex_handler_done: 528ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
560 */ 562 */
561 mfs r11, rpid 563 mfs r11, rpid
562 nop 564 nop
563 bri 4
564 mfs r3, rear /* Get faulting address */
565 nop
566 /* If we are faulting a kernel address, we have to use the 565 /* If we are faulting a kernel address, we have to use the
567 * kernel page tables. 566 * kernel page tables.
568 */ 567 */
569 ori r4, r0, CONFIG_KERNEL_START 568 ori r5, r0, CONFIG_KERNEL_START
570 cmpu r4, r3, r4 569 cmpu r5, r3, r5
571 bgti r4, ex3 570 bgti r5, ex3
572 /* First, check if it was a zone fault (which means a user 571 /* First, check if it was a zone fault (which means a user
573 * tried to access a kernel or read-protected page - always 572 * tried to access a kernel or read-protected page - always
574 * a SEGV). All other faults here must be stores, so no 573 * a SEGV). All other faults here must be stores, so no
575 * need to check ESR_S as well. */ 574 * need to check ESR_S as well. */
576 mfs r4, resr
577 nop
578 andi r4, r4, 0x800 /* ESR_Z - zone protection */ 575 andi r4, r4, 0x800 /* ESR_Z - zone protection */
579 bnei r4, ex2 576 bnei r4, ex2
580 577
@@ -589,8 +586,6 @@ ex_handler_done:
589 * tried to access a kernel or read-protected page - always 586 * tried to access a kernel or read-protected page - always
590 * a SEGV). All other faults here must be stores, so no 587 * a SEGV). All other faults here must be stores, so no
591 * need to check ESR_S as well. */ 588 * need to check ESR_S as well. */
592 mfs r4, resr
593 nop
594 andi r4, r4, 0x800 /* ESR_Z */ 589 andi r4, r4, 0x800 /* ESR_Z */
595 bnei r4, ex2 590 bnei r4, ex2
596 /* get current task address */ 591 /* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
665 * R3 = ESR 660 * R3 = ESR
666 */ 661 */
667 662
668 mfs r3, rear /* Get faulting address */
669 nop
670 RESTORE_STATE; 663 RESTORE_STATE;
671 bri page_fault_instr_trap 664 bri page_fault_instr_trap
672 665
@@ -677,18 +670,15 @@ ex_handler_done:
677 */ 670 */
678 handle_data_tlb_miss_exception: 671 handle_data_tlb_miss_exception:
679 /* Working registers already saved: R3, R4, R5, R6 672 /* Working registers already saved: R3, R4, R5, R6
680 * R3 = ESR 673 * R3 = EAR, R4 = ESR
681 */ 674 */
682 mfs r11, rpid 675 mfs r11, rpid
683 nop 676 nop
684 bri 4
685 mfs r3, rear /* Get faulting address */
686 nop
687 677
688 /* If we are faulting a kernel address, we have to use the 678 /* If we are faulting a kernel address, we have to use the
689 * kernel page tables. */ 679 * kernel page tables. */
690 ori r4, r0, CONFIG_KERNEL_START 680 ori r6, r0, CONFIG_KERNEL_START
691 cmpu r4, r3, r4 681 cmpu r4, r3, r6
692 bgti r4, ex5 682 bgti r4, ex5
693 ori r4, r0, swapper_pg_dir 683 ori r4, r0, swapper_pg_dir
694 mts rpid, r0 /* TLB will have 0 TID */ 684 mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
731 * Many of these bits are software only. Bits we don't set 721 * Many of these bits are software only. Bits we don't set
732 * here we (properly should) assume have the appropriate value. 722 * here we (properly should) assume have the appropriate value.
733 */ 723 */
724 brid finish_tlb_load
734 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 725 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
735
736 bri finish_tlb_load
737 ex7: 726 ex7:
738 /* The bailout. Restore registers to pre-exception conditions 727 /* The bailout. Restore registers to pre-exception conditions
739 * and call the heavyweights to help us out. 728 * and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
754 */ 743 */
755 mfs r11, rpid 744 mfs r11, rpid
756 nop 745 nop
757 bri 4
758 mfs r3, rear /* Get faulting address */
759 nop
760 746
761 /* If we are faulting a kernel address, we have to use the 747 /* If we are faulting a kernel address, we have to use the
762 * kernel page tables. 748 * kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
792 lwi r4, r5, 0 /* Get Linux PTE */ 778 lwi r4, r5, 0 /* Get Linux PTE */
793 779
794 andi r6, r4, _PAGE_PRESENT 780 andi r6, r4, _PAGE_PRESENT
795 beqi r6, ex7 781 beqi r6, ex10
796 782
797 ori r4, r4, _PAGE_ACCESSED 783 ori r4, r4, _PAGE_ACCESSED
798 swi r4, r5, 0 784 swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
805 * Many of these bits are software only. Bits we don't set 791 * Many of these bits are software only. Bits we don't set
806 * here we (properly should) assume have the appropriate value. 792 * here we (properly should) assume have the appropriate value.
807 */ 793 */
794 brid finish_tlb_load
808 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 795 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
809
810 bri finish_tlb_load
811 ex10: 796 ex10:
812 /* The bailout. Restore registers to pre-exception conditions 797 /* The bailout. Restore registers to pre-exception conditions
813 * and call the heavyweights to help us out. 798 * and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
837 andi r5, r5, (MICROBLAZE_TLB_SIZE-1) 822 andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
838 ori r6, r0, 1 823 ori r6, r0, 1
839 cmp r31, r5, r6 824 cmp r31, r5, r6
840 blti r31, sem 825 blti r31, ex12
841 addik r5, r6, 1 826 addik r5, r6, 1
842 sem: 827 ex12:
843 /* MS: save back current TLB index */ 828 /* MS: save back current TLB index */
844 swi r5, r0, TOPHYS(tlb_index) 829 swi r5, r0, TOPHYS(tlb_index)
845 830
@@ -859,7 +844,6 @@ ex_handler_done:
859 nop 844 nop
860 845
861 /* Done...restore registers and get out of here. */ 846 /* Done...restore registers and get out of here. */
862 ex12:
863 mts rpid, r11 847 mts rpid, r11
864 nop 848 nop
865 bri 4 849 bri 4
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 0f06034d1fe0..6f39e2c001f3 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -93,3 +93,18 @@ skip:
93 } 93 }
94 return 0; 94 return 0;
95} 95}
96
97/* MS: There is no any advance mapping mechanism. We are using simple 32bit
98 intc without any cascades or any connection that's why mapping is 1:1 */
99unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
100{
101 return hwirq;
102}
103EXPORT_SYMBOL_GPL(irq_create_mapping);
104
105unsigned int irq_create_of_mapping(struct device_node *controller,
106 u32 *intspec, unsigned int intsize)
107{
108 return intspec[0];
109}
110EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8e..7cf86498326c 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
26 * We avoid flushing the pinned 0, 1 and possibly 2 entries. 26 * We avoid flushing the pinned 0, 1 and possibly 2 entries.
27 */ 27 */
28.globl _tlbia; 28.globl _tlbia;
29.type _tlbia, @function
29.align 4; 30.align 4;
30_tlbia: 31_tlbia:
31 addik r12, r0, 63 /* flush all entries (63 - 3) */ 32 addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
32 /* isync */ 33 /* isync */
33_tlbia_1: 34_tlbia_1:
34 mts rtlbx, r12 35 mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
41 /* sync */ 42 /* sync */
42 rtsd r15, 8 43 rtsd r15, 8
43 nop 44 nop
45 .size _tlbia, . - _tlbia
44 46
45/* 47/*
46 * Flush MMU TLB for a particular address (in r5) 48 * Flush MMU TLB for a particular address (in r5)
47 */ 49 */
48.globl _tlbie; 50.globl _tlbie;
51.type _tlbie, @function
49.align 4; 52.align 4;
50_tlbie: 53_tlbie:
51 mts rtlbsx, r5 /* look up the address in TLB */ 54 mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
59 rtsd r15, 8 62 rtsd r15, 8
60 nop 63 nop
61 64
65 .size _tlbie, . - _tlbie
66
62/* 67/*
63 * Allocate TLB entry for early console 68 * Allocate TLB entry for early console
64 */ 69 */
65.globl early_console_reg_tlb_alloc; 70.globl early_console_reg_tlb_alloc;
71.type early_console_reg_tlb_alloc, @function
66.align 4; 72.align 4;
67early_console_reg_tlb_alloc: 73early_console_reg_tlb_alloc:
68 /* 74 /*
69 * Load a TLB entry for the UART, so that microblaze_progress() can use 75 * Load a TLB entry for the UART, so that microblaze_progress() can use
70 * the UARTs nice and early. We use a 4k real==virtual mapping. 76 * the UARTs nice and early. We use a 4k real==virtual mapping.
71 */ 77 */
72 ori r4, r0, 63 78 ori r4, r0, MICROBLAZE_TLB_SIZE - 1
73 mts rtlbx, r4 /* TLB slot 2 */ 79 mts rtlbx, r4 /* TLB slot 2 */
74 80
75 or r4,r5,r0 81 or r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
86 rtsd r15, 8 92 rtsd r15, 8
87 nop 93 nop
88 94
95 .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
96
89/* 97/*
90 * Copy a whole page (4096 bytes). 98 * Copy a whole page (4096 bytes).
91 */ 99 */
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
104#define DCACHE_LINE_BYTES (4 * 4) 112#define DCACHE_LINE_BYTES (4 * 4)
105 113
106.globl copy_page; 114.globl copy_page;
115.type copy_page, @function
107.align 4; 116.align 4;
108copy_page: 117copy_page:
109 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1 118 ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
118 addik r11, r11, -1 127 addik r11, r11, -1
119 rtsd r15, 8 128 rtsd r15, 8
120 nop 129 nop
130
131 .size copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 5a45b1adfef1..cbecf110dc30 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -12,7 +12,6 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/elf.h> 13#include <linux/elf.h>
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/slab.h>
16#include <linux/fs.h> 15#include <linux/fs.h>
17#include <linux/string.h> 16#include <linux/string.h>
18 17
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c
index 1c6d684996d7..0dc755286d38 100644
--- a/arch/microblaze/kernel/of_platform.c
+++ b/arch/microblaze/kernel/of_platform.c
@@ -17,7 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mod_devicetable.h> 19#include <linux/mod_devicetable.h>
20#include <linux/slab.h>
21#include <linux/pci.h> 20#include <linux/pci.h>
22#include <linux/of.h> 21#include <linux/of.h>
23#include <linux/of_device.h> 22#include <linux/of_device.h>
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf06c9e..09bed44dfcd3 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <asm/system.h> 16#include <asm/system.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18#include <asm/uaccess.h> /* for USER_DS macros */
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19 20
20void show_regs(struct pt_regs *regs) 21void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
74 75
75void default_idle(void) 76void default_idle(void)
76{ 77{
77 if (!hlt_counter) { 78 if (likely(hlt_counter)) {
79 while (!need_resched())
80 cpu_relax();
81 } else {
78 clear_thread_flag(TIF_POLLING_NRFLAG); 82 clear_thread_flag(TIF_POLLING_NRFLAG);
79 smp_mb__after_clear_bit(); 83 smp_mb__after_clear_bit();
80 local_irq_disable(); 84 local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
82 cpu_sleep(); 86 cpu_sleep();
83 local_irq_enable(); 87 local_irq_enable();
84 set_thread_flag(TIF_POLLING_NRFLAG); 88 set_thread_flag(TIF_POLLING_NRFLAG);
85 } else 89 }
86 while (!need_resched())
87 cpu_relax();
88} 90}
89 91
90void cpu_idle(void) 92void cpu_idle(void)
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 6d6349a145f9..a4a7770c6140 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -75,7 +75,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
75{ 75{
76 int rval; 76 int rval;
77 unsigned long val = 0; 77 unsigned long val = 0;
78 unsigned long copied;
79 78
80 switch (request) { 79 switch (request) {
81 /* Read/write the word at location ADDR in the registers. */ 80 /* Read/write the word at location ADDR in the registers. */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bb8c4b9ccb80..17c98dbcec88 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -22,7 +22,10 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <linux/param.h> 24#include <linux/param.h>
25#include <linux/pci.h>
25#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/of_platform.h>
28#include <linux/dma-mapping.h>
26#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
27#include <asm/entry.h> 30#include <asm/entry.h>
28#include <asm/cpuinfo.h> 31#include <asm/cpuinfo.h>
@@ -54,14 +57,10 @@ void __init setup_arch(char **cmdline_p)
54 57
55 microblaze_cache_init(); 58 microblaze_cache_init();
56 59
57 invalidate_dcache();
58 enable_dcache();
59
60 invalidate_icache();
61 enable_icache();
62
63 setup_memory(); 60 setup_memory();
64 61
62 xilinx_pci_init();
63
65#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 64#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
66 printk(KERN_NOTICE "Self modified code enable\n"); 65 printk(KERN_NOTICE "Self modified code enable\n");
67#endif 66#endif
@@ -93,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
93} 92}
94#endif /* CONFIG_MTD_UCLINUX_EBSS */ 93#endif /* CONFIG_MTD_UCLINUX_EBSS */
95 94
95#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
96#define eprintk early_printk
97#else
98#define eprintk printk
99#endif
100
96void __init machine_early_init(const char *cmdline, unsigned int ram, 101void __init machine_early_init(const char *cmdline, unsigned int ram,
97 unsigned int fdt, unsigned int msr) 102 unsigned int fdt, unsigned int msr)
98{ 103{
@@ -140,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
140 setup_early_printk(NULL); 145 setup_early_printk(NULL);
141#endif 146#endif
142 147
143 early_printk("Ramdisk addr 0x%08x, ", ram); 148 eprintk("Ramdisk addr 0x%08x, ", ram);
144 if (fdt) 149 if (fdt)
145 early_printk("FDT at 0x%08x\n", fdt); 150 eprintk("FDT at 0x%08x\n", fdt);
146 else 151 else
147 early_printk("Compiled-in FDT at 0x%08x\n", 152 eprintk("Compiled-in FDT at 0x%08x\n",
148 (unsigned int)_fdt_start); 153 (unsigned int)_fdt_start);
149 154
150#ifdef CONFIG_MTD_UCLINUX 155#ifdef CONFIG_MTD_UCLINUX
151 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 156 eprintk("Found romfs @ 0x%08x (0x%08x)\n",
152 romfs_base, romfs_size); 157 romfs_base, romfs_size);
153 early_printk("#### klimit %p ####\n", old_klimit); 158 eprintk("#### klimit %p ####\n", old_klimit);
154 BUG_ON(romfs_size < 0); /* What else can we do? */ 159 BUG_ON(romfs_size < 0); /* What else can we do? */
155 160
156 early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", 161 eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
157 romfs_size, romfs_base, (unsigned)&_ebss); 162 romfs_size, romfs_base, (unsigned)&_ebss);
158 163
159 early_printk("New klimit: 0x%08x\n", (unsigned)klimit); 164 eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
160#endif 165#endif
161 166
162#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 167#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
163 if (msr) 168 if (msr)
164 early_printk("!!!Your kernel has setup MSR instruction but " 169 eprintk("!!!Your kernel has setup MSR instruction but "
165 "CPU don't have it %d\n", msr); 170 "CPU don't have it %d\n", msr);
166#else 171#else
167 if (!msr) 172 if (!msr)
168 early_printk("!!!Your kernel not setup MSR instruction but " 173 eprintk("!!!Your kernel not setup MSR instruction but "
169 "CPU have it %d\n", msr); 174 "CPU have it %d\n", msr);
170#endif 175#endif
171 176
@@ -188,3 +193,37 @@ static int microblaze_debugfs_init(void)
188} 193}
189arch_initcall(microblaze_debugfs_init); 194arch_initcall(microblaze_debugfs_init);
190#endif 195#endif
196
197static int dflt_bus_notify(struct notifier_block *nb,
198 unsigned long action, void *data)
199{
200 struct device *dev = data;
201
202 /* We are only intereted in device addition */
203 if (action != BUS_NOTIFY_ADD_DEVICE)
204 return 0;
205
206 set_dma_ops(dev, &dma_direct_ops);
207
208 return NOTIFY_DONE;
209}
210
211static struct notifier_block dflt_plat_bus_notifier = {
212 .notifier_call = dflt_bus_notify,
213 .priority = INT_MAX,
214};
215
216static struct notifier_block dflt_of_bus_notifier = {
217 .notifier_call = dflt_bus_notify,
218 .priority = INT_MAX,
219};
220
221static int __init setup_bus_notifier(void)
222{
223 bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
224 bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
225
226 return 0;
227}
228
229arch_initcall(setup_bus_notifier);
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 9f3c205fb75b..f4e00b7f1259 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -30,6 +30,7 @@
30#include <linux/semaphore.h> 30#include <linux/semaphore.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/unistd.h> 32#include <linux/unistd.h>
33#include <linux/slab.h>
33 34
34#include <asm/syscalls.h> 35#include <asm/syscalls.h>
35 36
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31b..5e4570ef515c 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
22 __enable_hw_exceptions(); 22 __enable_hw_exceptions();
23} 23}
24 24
25static int kstack_depth_to_print = 24; 25static unsigned long kstack_depth_to_print = 24;
26 26
27static int __init kstack_setup(char *s) 27static int __init kstack_setup(char *s)
28{ 28{
29 kstack_depth_to_print = strict_strtoul(s, 0, NULL); 29 return !strict_strtoul(s, 0, &kstack_depth_to_print);
30
31 return 1;
32} 30}
33__setup("kstack=", kstack_setup); 31__setup("kstack=", kstack_setup);
34 32
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index b579db068c06..4dfe47d3cd91 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,5 +10,4 @@ else
10lib-y += memcpy.o memmove.o 10lib-y += memcpy.o memmove.o
11endif 11endif
12 12
13lib-$(CONFIG_NO_MMU) += uaccess.o 13lib-y += uaccess_old.o
14lib-$(CONFIG_MMU) += uaccess_old.o
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 02e3ab4eddf3..fdc48bb065d8 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -30,8 +30,9 @@
30 */ 30 */
31 31
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33 33 .text
34 .globl memcpy 34 .globl memcpy
35 .type memcpy, @function
35 .ent memcpy 36 .ent memcpy
36 37
37memcpy: 38memcpy:
@@ -345,9 +346,11 @@ a_done:
345 rtsd r15, 8 346 rtsd r15, 8
346 nop 347 nop
347 348
349.size memcpy, . - memcpy
348.end memcpy 350.end memcpy
349/*----------------------------------------------------------------------------*/ 351/*----------------------------------------------------------------------------*/
350 .globl memmove 352 .globl memmove
353 .type memmove, @function
351 .ent memmove 354 .ent memmove
352 355
353memmove: 356memmove:
@@ -659,4 +662,5 @@ d_done:
659 rtsd r15, 8 662 rtsd r15, 8
660 nop 663 nop
661 664
665.size memmove, . - memmove
662.end memmove 666.end memmove
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index cc2108b6b260..014bac92bdff 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
53 const uint32_t *i_src; 53 const uint32_t *i_src;
54 uint32_t *i_dst; 54 uint32_t *i_dst;
55 55
56 if (c >= 4) { 56 if (likely(c >= 4)) {
57 unsigned value, buf_hold; 57 unsigned value, buf_hold;
58 58
59 /* Align the dstination to a word boundry. */ 59 /* Align the dstination to a word boundry. */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 4df851d41a29..ecfb663e1fc1 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -33,22 +33,23 @@
33#ifdef __HAVE_ARCH_MEMSET 33#ifdef __HAVE_ARCH_MEMSET
34void *memset(void *v_src, int c, __kernel_size_t n) 34void *memset(void *v_src, int c, __kernel_size_t n)
35{ 35{
36
37 char *src = v_src; 36 char *src = v_src;
38#ifdef CONFIG_OPT_LIB_FUNCTION 37#ifdef CONFIG_OPT_LIB_FUNCTION
39 uint32_t *i_src; 38 uint32_t *i_src;
40 uint32_t w32; 39 uint32_t w32 = 0;
41#endif 40#endif
42 /* Truncate c to 8 bits */ 41 /* Truncate c to 8 bits */
43 c = (c & 0xFF); 42 c = (c & 0xFF);
44 43
45#ifdef CONFIG_OPT_LIB_FUNCTION 44#ifdef CONFIG_OPT_LIB_FUNCTION
46 /* Make a repeating word out of it */ 45 if (unlikely(c)) {
47 w32 = c; 46 /* Make a repeating word out of it */
48 w32 |= w32 << 8; 47 w32 = c;
49 w32 |= w32 << 16; 48 w32 |= w32 << 8;
49 w32 |= w32 << 16;
50 }
50 51
51 if (n >= 4) { 52 if (likely(n >= 4)) {
52 /* Align the destination to a word boundary */ 53 /* Align the destination to a word boundary */
53 /* This is done in an endian independant manner */ 54 /* This is done in an endian independant manner */
54 switch ((unsigned) src & 3) { 55 switch ((unsigned) src & 3) {
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
deleted file mode 100644
index a853fe089c44..000000000000
--- a/arch/microblaze/lib/uaccess.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#include <linux/string.h>
10#include <asm/uaccess.h>
11
12#include <asm/bug.h>
13
14long strnlen_user(const char __user *src, long count)
15{
16 return strlen(src) + 1;
17}
18
19#define __do_strncpy_from_user(dst, src, count, res) \
20 do { \
21 char *tmp; \
22 strncpy(dst, src, count); \
23 for (tmp = dst; *tmp && count > 0; tmp++, count--) \
24 ; \
25 res = (tmp - dst); \
26 } while (0)
27
28long __strncpy_from_user(char *dst, const char __user *src, long count)
29{
30 long res;
31 __do_strncpy_from_user(dst, src, count, res);
32 return res;
33}
34
35long strncpy_from_user(char *dst, const char __user *src, long count)
36{
37 long res = -EFAULT;
38 if (access_ok(VERIFY_READ, src, 1))
39 __do_strncpy_from_user(dst, src, count, res);
40 return res;
41}
42
43unsigned long __copy_tofrom_user(void __user *to,
44 const void __user *from, unsigned long size)
45{
46 memcpy(to, from, size);
47 return 0;
48}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 67f991c14b8a..5810cec54a7a 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -22,6 +22,7 @@
22 22
23 .text 23 .text
24.globl __strncpy_user; 24.globl __strncpy_user;
25.type __strncpy_user, @function
25.align 4; 26.align 4;
26__strncpy_user: 27__strncpy_user:
27 28
@@ -50,7 +51,7 @@ __strncpy_user:
503: 513:
51 rtsd r15,8 52 rtsd r15,8
52 nop 53 nop
53 54 .size __strncpy_user, . - __strncpy_user
54 55
55 .section .fixup, "ax" 56 .section .fixup, "ax"
56 .align 2 57 .align 2
@@ -72,6 +73,7 @@ __strncpy_user:
72 73
73 .text 74 .text
74.globl __strnlen_user; 75.globl __strnlen_user;
76.type __strnlen_user, @function
75.align 4; 77.align 4;
76__strnlen_user: 78__strnlen_user:
77 addik r3,r6,0 79 addik r3,r6,0
@@ -90,7 +92,7 @@ __strnlen_user:
903: 923:
91 rtsd r15,8 93 rtsd r15,8
92 nop 94 nop
93 95 .size __strnlen_user, . - __strnlen_user
94 96
95 .section .fixup,"ax" 97 .section .fixup,"ax"
964: 984:
@@ -108,6 +110,7 @@ __strnlen_user:
108 */ 110 */
109 .text 111 .text
110.globl __copy_tofrom_user; 112.globl __copy_tofrom_user;
113.type __copy_tofrom_user, @function
111.align 4; 114.align 4;
112__copy_tofrom_user: 115__copy_tofrom_user:
113 /* 116 /*
@@ -116,20 +119,34 @@ __copy_tofrom_user:
116 * r7, r3 - count 119 * r7, r3 - count
117 * r4 - tempval 120 * r4 - tempval
118 */ 121 */
119 addik r3,r7,0 122 beqid r7, 3f /* zero size is not likely */
120 beqi r3,3f 123 andi r3, r7, 0x3 /* filter add count */
1211: 124 bneid r3, 4f /* if is odd value then byte copying */
122 lbu r4,r6,r0 125 or r3, r5, r6 /* find if is any to/from unaligned */
123 addik r6,r6,1 126 andi r3, r3, 0x3 /* mask unaligned */
1242: 127 bneid r3, 1f /* it is unaligned -> then jump */
125 sb r4,r5,r0 128 or r3, r0, r0
126 addik r3,r3,-1 129
127 bneid r3,1b 130/* at least one 4 byte copy */
128 addik r5,r5,1 /* delay slot */ 1315: lw r4, r6, r3
1326: sw r4, r5, r3
133 addik r7, r7, -4
134 bneid r7, 5b
135 addik r3, r3, 4
136 addik r3, r7, 0
137 rtsd r15, 8
138 nop
1394: or r3, r0, r0
1401: lbu r4,r6,r3
1412: sb r4,r5,r3
142 addik r7,r7,-1
143 bneid r7,1b
144 addik r3,r3,1 /* delay slot */
1293: 1453:
146 addik r3,r7,0
130 rtsd r15,8 147 rtsd r15,8
131 nop 148 nop
132 149 .size __copy_tofrom_user, . - __copy_tofrom_user
133 150
134 .section __ex_table,"a" 151 .section __ex_table,"a"
135 .word 1b,3b,2b,3b 152 .word 1b,3b,2b,3b,5b,3b,6b,3b
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index 6c8a924d9e26..09c49ed87235 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile 2# Makefile
3# 3#
4 4
5obj-y := init.o 5obj-y := consistent.o init.o
6 6
7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o 7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
new file mode 100644
index 000000000000..f956e24fe49c
--- /dev/null
+++ b/arch/microblaze/mm/consistent.c
@@ -0,0 +1,247 @@
1/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/gfp.h>
36
37#include <asm/pgalloc.h>
38#include <linux/io.h>
39#include <linux/hardirq.h>
40#include <asm/mmu_context.h>
41#include <asm/mmu.h>
42#include <linux/uaccess.h>
43#include <asm/pgtable.h>
44#include <asm/cpuinfo.h>
45
46#ifndef CONFIG_MMU
47
48/* I have to use dcache values because I can't relate on ram size */
49#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
50
51/*
52 * Consistent memory allocators. Used for DMA devices that want to
53 * share uncached memory with the processor core.
54 * My crufty no-MMU approach is simple. In the HW platform we can optionally
55 * mirror the DDR up above the processor cacheable region. So, memory accessed
56 * in this mirror region will not be cached. It's alloced from the same
57 * pool as normal memory, but the handle we return is shifted up into the
58 * uncached region. This will no doubt cause big problems if memory allocated
59 * here is not also freed properly. -- JW
60 */
61void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
62{
63 struct page *page, *end, *free;
64 unsigned long order;
65 void *ret, *virt;
66
67 if (in_interrupt())
68 BUG();
69
70 size = PAGE_ALIGN(size);
71 order = get_order(size);
72
73 page = alloc_pages(gfp, order);
74 if (!page)
75 goto no_page;
76
77 /* We could do with a page_to_phys and page_to_bus here. */
78 virt = page_address(page);
79 ret = ioremap(virt_to_phys(virt), size);
80 if (!ret)
81 goto no_remap;
82
83 /*
84 * Here's the magic! Note if the uncached shadow is not implemented,
85 * it's up to the calling code to also test that condition and make
86 * other arranegments, such as manually flushing the cache and so on.
87 */
88#ifdef CONFIG_XILINX_UNCACHED_SHADOW
89 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
90#endif
91 /* dma_handle is same as physical (shadowed) address */
92 *dma_handle = (dma_addr_t)ret;
93
94 /*
95 * free wasted pages. We skip the first page since we know
96 * that it will have count = 1 and won't require freeing.
97 * We also mark the pages in use as reserved so that
98 * remap_page_range works.
99 */
100 page = virt_to_page(virt);
101 free = page + (size >> PAGE_SHIFT);
102 end = page + (1 << order);
103
104 for (; page < end; page++) {
105 init_page_count(page);
106 if (page >= free)
107 __free_page(page);
108 else
109 SetPageReserved(page);
110 }
111
112 return ret;
113no_remap:
114 __free_pages(page, order);
115no_page:
116 return NULL;
117}
118
119#else
120
121void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
122{
123 int order, err, i;
124 unsigned long page, va, flags;
125 phys_addr_t pa;
126 struct vm_struct *area;
127 void *ret;
128
129 if (in_interrupt())
130 BUG();
131
132 /* Only allocate page size areas. */
133 size = PAGE_ALIGN(size);
134 order = get_order(size);
135
136 page = __get_free_pages(gfp, order);
137 if (!page) {
138 BUG();
139 return NULL;
140 }
141
142 /*
143 * we need to ensure that there are no cachelines in use,
144 * or worse dirty in this area.
145 */
146 flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
147
148 /* Allocate some common virtual space to map the new pages. */
149 area = get_vm_area(size, VM_ALLOC);
150 if (area == NULL) {
151 free_pages(page, order);
152 return NULL;
153 }
154 va = (unsigned long) area->addr;
155 ret = (void *)va;
156
157 /* This gives us the real physical address of the first page. */
158 *dma_handle = pa = virt_to_bus((void *)page);
159
160 /* MS: This is the whole magic - use cache inhibit pages */
161 flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
162
163 /*
164 * Set refcount=1 on all pages in an order>0
165 * allocation so that vfree() will actually
166 * free all pages that were allocated.
167 */
168 if (order > 0) {
169 struct page *rpage = virt_to_page(page);
170 for (i = 1; i < (1 << order); i++)
171 init_page_count(rpage+i);
172 }
173
174 err = 0;
175 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
176 err = map_page(va+i, pa+i, flags);
177
178 if (err) {
179 vfree((void *)va);
180 return NULL;
181 }
182
183 return ret;
184}
185#endif /* CONFIG_MMU */
186EXPORT_SYMBOL(consistent_alloc);
187
188/*
189 * free page(s) as defined by the above mapping.
190 */
191void consistent_free(void *vaddr)
192{
193 if (in_interrupt())
194 BUG();
195
196 /* Clear SHADOW_MASK bit in address, and free as per usual */
197#ifdef CONFIG_XILINX_UNCACHED_SHADOW
198 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
199#endif
200 vfree(vaddr);
201}
202EXPORT_SYMBOL(consistent_free);
203
204/*
205 * make an area consistent.
206 */
207void consistent_sync(void *vaddr, size_t size, int direction)
208{
209 unsigned long start;
210 unsigned long end;
211
212 start = (unsigned long)vaddr;
213
214 /* Convert start address back down to unshadowed memory region */
215#ifdef CONFIG_XILINX_UNCACHED_SHADOW
216 start &= ~UNCACHED_SHADOW_MASK;
217#endif
218 end = start + size;
219
220 switch (direction) {
221 case PCI_DMA_NONE:
222 BUG();
223 case PCI_DMA_FROMDEVICE: /* invalidate only */
224 flush_dcache_range(start, end);
225 break;
226 case PCI_DMA_TODEVICE: /* writeback only */
227 flush_dcache_range(start, end);
228 break;
229 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
230 flush_dcache_range(start, end);
231 break;
232 }
233}
234EXPORT_SYMBOL(consistent_sync);
235
236/*
237 * consistent_sync_page makes memory consistent. identical
238 * to consistent_sync, but takes a struct page instead of a
239 * virtual address
240 */
241void consistent_sync_page(struct page *page, unsigned long offset,
242 size_t size, int direction)
243{
244 unsigned long start = (unsigned long)page_address(page) + offset;
245 consistent_sync((void *)start, size, direction);
246}
247EXPORT_SYMBOL(consistent_sync_page);
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a66ff2..7af87f4b2c2c 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
106 regs->esr = error_code; 106 regs->esr = error_code;
107 107
108 /* On a kernel SLB miss we can only check for a valid exception entry */ 108 /* On a kernel SLB miss we can only check for a valid exception entry */
109 if (kernel_mode(regs) && (address >= TASK_SIZE)) { 109 if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
110 printk(KERN_WARNING "kernel task_size exceed"); 110 printk(KERN_WARNING "kernel task_size exceed");
111 _exception(SIGSEGV, regs, code, address); 111 _exception(SIGSEGV, regs, code, address);
112 } 112 }
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
122 } 122 }
123#endif /* CONFIG_KGDB */ 123#endif /* CONFIG_KGDB */
124 124
125 if (in_atomic() || !mm) { 125 if (unlikely(in_atomic() || !mm)) {
126 if (kernel_mode(regs)) 126 if (kernel_mode(regs))
127 goto bad_area_nosemaphore; 127 goto bad_area_nosemaphore;
128 128
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
150 * source. If this is invalid we can skip the address space check, 150 * source. If this is invalid we can skip the address space check,
151 * thus avoiding the deadlock. 151 * thus avoiding the deadlock.
152 */ 152 */
153 if (!down_read_trylock(&mm->mmap_sem)) { 153 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
154 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 154 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
155 goto bad_area_nosemaphore; 155 goto bad_area_nosemaphore;
156 156
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
158 } 158 }
159 159
160 vma = find_vma(mm, address); 160 vma = find_vma(mm, address);
161 if (!vma) 161 if (unlikely(!vma))
162 goto bad_area; 162 goto bad_area;
163 163
164 if (vma->vm_start <= address) 164 if (vma->vm_start <= address)
165 goto good_area; 165 goto good_area;
166 166
167 if (!(vma->vm_flags & VM_GROWSDOWN)) 167 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
168 goto bad_area; 168 goto bad_area;
169 169
170 if (!is_write) 170 if (unlikely(!is_write))
171 goto bad_area; 171 goto bad_area;
172 172
173 /* 173 /*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
179 * before setting the user r1. Thus we allow the stack to 179 * before setting the user r1. Thus we allow the stack to
180 * expand to 1MB without further checks. 180 * expand to 1MB without further checks.
181 */ 181 */
182 if (address + 0x100000 < vma->vm_end) { 182 if (unlikely(address + 0x100000 < vma->vm_end)) {
183 183
184 /* get user regs even if this fault is in kernel mode */ 184 /* get user regs even if this fault is in kernel mode */
185 struct pt_regs *uregs = current->thread.regs; 185 struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
209 code = SEGV_ACCERR; 209 code = SEGV_ACCERR;
210 210
211 /* a write */ 211 /* a write */
212 if (is_write) { 212 if (unlikely(is_write)) {
213 if (!(vma->vm_flags & VM_WRITE)) 213 if (unlikely(!(vma->vm_flags & VM_WRITE)))
214 goto bad_area; 214 goto bad_area;
215 /* a read */ 215 /* a read */
216 } else { 216 } else {
217 /* protection fault */ 217 /* protection fault */
218 if (error_code & 0x08000000) 218 if (unlikely(error_code & 0x08000000))
219 goto bad_area; 219 goto bad_area;
220 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 220 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
221 goto bad_area; 221 goto bad_area;
222 } 222 }
223 223
@@ -235,7 +235,7 @@ survive:
235 goto do_sigbus; 235 goto do_sigbus;
236 BUG(); 236 BUG();
237 } 237 }
238 if (fault & VM_FAULT_MAJOR) 238 if (unlikely(fault & VM_FAULT_MAJOR))
239 current->maj_flt++; 239 current->maj_flt++;
240 else 240 else
241 current->min_flt++; 241 current->min_flt++;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a57cedf36715..f42c2dde8b1c 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -15,6 +15,7 @@
15#include <linux/initrd.h> 15#include <linux/initrd.h>
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/pfn.h> 17#include <linux/pfn.h>
18#include <linux/slab.h>
18#include <linux/swap.h> 19#include <linux/swap.h>
19 20
20#include <asm/page.h> 21#include <asm/page.h>
@@ -23,6 +24,9 @@
23#include <asm/sections.h> 24#include <asm/sections.h>
24#include <asm/tlb.h> 25#include <asm/tlb.h>
25 26
27/* Use for MMU and noMMU because of PCI generic code */
28int mem_init_done;
29
26#ifndef CONFIG_MMU 30#ifndef CONFIG_MMU
27unsigned int __page_offset; 31unsigned int __page_offset;
28EXPORT_SYMBOL(__page_offset); 32EXPORT_SYMBOL(__page_offset);
@@ -30,7 +34,6 @@ EXPORT_SYMBOL(__page_offset);
30#else 34#else
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 36
33int mem_init_done;
34static int init_bootmem_done; 37static int init_bootmem_done;
35#endif /* CONFIG_MMU */ 38#endif /* CONFIG_MMU */
36 39
@@ -163,7 +166,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
163 for (addr = begin; addr < end; addr += PAGE_SIZE) { 166 for (addr = begin; addr < end; addr += PAGE_SIZE) {
164 ClearPageReserved(virt_to_page(addr)); 167 ClearPageReserved(virt_to_page(addr));
165 init_page_count(virt_to_page(addr)); 168 init_page_count(virt_to_page(addr));
166 memset((void *)addr, 0xcc, PAGE_SIZE);
167 free_page(addr); 169 free_page(addr);
168 totalram_pages++; 170 totalram_pages++;
169 } 171 }
@@ -193,12 +195,6 @@ void free_initmem(void)
193 (unsigned long)(&__init_end)); 195 (unsigned long)(&__init_end));
194} 196}
195 197
196/* FIXME from arch/powerpc/mm/mem.c*/
197void show_mem(void)
198{
199 printk(KERN_NOTICE "%s\n", __func__);
200}
201
202void __init mem_init(void) 198void __init mem_init(void)
203{ 199{
204 high_memory = (void *)__va(memory_end); 200 high_memory = (void *)__va(memory_end);
@@ -208,20 +204,14 @@ void __init mem_init(void)
208 printk(KERN_INFO "Memory: %luk/%luk available\n", 204 printk(KERN_INFO "Memory: %luk/%luk available\n",
209 nr_free_pages() << (PAGE_SHIFT-10), 205 nr_free_pages() << (PAGE_SHIFT-10),
210 num_physpages << (PAGE_SHIFT-10)); 206 num_physpages << (PAGE_SHIFT-10));
211#ifdef CONFIG_MMU
212 mem_init_done = 1; 207 mem_init_done = 1;
213#endif
214} 208}
215 209
216#ifndef CONFIG_MMU 210#ifndef CONFIG_MMU
217/* Check against bounds of physical memory */ 211int page_is_ram(unsigned long pfn)
218int ___range_ok(unsigned long addr, unsigned long size)
219{ 212{
220 return ((addr < memory_start) || 213 return __range_ok(pfn, 0);
221 ((addr + size) > memory_end));
222} 214}
223EXPORT_SYMBOL(___range_ok);
224
225#else 215#else
226int page_is_ram(unsigned long pfn) 216int page_is_ram(unsigned long pfn)
227{ 217{
@@ -349,4 +339,27 @@ void __init *early_get_page(void)
349 } 339 }
350 return p; 340 return p;
351} 341}
342
352#endif /* CONFIG_MMU */ 343#endif /* CONFIG_MMU */
344
345void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
346{
347 if (mem_init_done)
348 return kmalloc(size, mask);
349 else
350 return alloc_bootmem(size);
351}
352
353void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
354{
355 void *p;
356
357 if (mem_init_done)
358 p = kzalloc(size, mask);
359 else {
360 p = alloc_bootmem(size);
361 if (p)
362 memset(p, 0, size);
363 }
364 return p;
365}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 2820081b21ab..d31312cde6ea 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -103,7 +103,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
103 area = get_vm_area(size, VM_IOREMAP); 103 area = get_vm_area(size, VM_IOREMAP);
104 if (area == NULL) 104 if (area == NULL)
105 return NULL; 105 return NULL;
106 v = VMALLOC_VMADDR(area->addr); 106 v = (unsigned long) area->addr;
107 } else { 107 } else {
108 v = (ioremap_bot -= size); 108 v = (ioremap_bot -= size);
109 } 109 }
@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
154 err = 0; 154 err = 0;
155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 155 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
156 __pgprot(flags))); 156 __pgprot(flags)));
157 if (mem_init_done) 157 if (unlikely(mem_init_done))
158 flush_HPTE(0, va, pmd_val(*pd)); 158 flush_HPTE(0, va, pmd_val(*pd));
159 /* flush_HPTE(0, va, pg); */ 159 /* flush_HPTE(0, va, pg); */
160 } 160 }
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
new file mode 100644
index 000000000000..9889cc2e1294
--- /dev/null
+++ b/arch/microblaze/pci/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile
3#
4
5obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
6obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c
new file mode 100644
index 000000000000..25f18f017f21
--- /dev/null
+++ b/arch/microblaze/pci/indirect_pci.c
@@ -0,0 +1,163 @@
1/*
2 * Support for indirect PCI bridges.
3 *
4 * Copyright (C) 1998 Gabriel Paubert.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19#include <asm/prom.h>
20#include <asm/pci-bridge.h>
21
22static int
23indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
24 int len, u32 *val)
25{
26 struct pci_controller *hose = pci_bus_to_host(bus);
27 volatile void __iomem *cfg_data;
28 u8 cfg_type = 0;
29 u32 bus_no, reg;
30
31 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
32 if (bus->number != hose->first_busno)
33 return PCIBIOS_DEVICE_NOT_FOUND;
34 if (devfn != 0)
35 return PCIBIOS_DEVICE_NOT_FOUND;
36 }
37
38 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
39 if (bus->number != hose->first_busno)
40 cfg_type = 1;
41
42 bus_no = (bus->number == hose->first_busno) ?
43 hose->self_busno : bus->number;
44
45 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
46 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
47 else
48 reg = offset & 0xfc; /* Only 3 bits for function */
49
50 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
51 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
52 (devfn << 8) | reg | cfg_type));
53 else
54 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
55 (devfn << 8) | reg | cfg_type));
56
57 /*
58 * Note: the caller has already checked that offset is
59 * suitably aligned and that len is 1, 2 or 4.
60 */
61 cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
62 switch (len) {
63 case 1:
64 *val = in_8(cfg_data);
65 break;
66 case 2:
67 *val = in_le16(cfg_data);
68 break;
69 default:
70 *val = in_le32(cfg_data);
71 break;
72 }
73 return PCIBIOS_SUCCESSFUL;
74}
75
76static int
77indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
78 int len, u32 val)
79{
80 struct pci_controller *hose = pci_bus_to_host(bus);
81 volatile void __iomem *cfg_data;
82 u8 cfg_type = 0;
83 u32 bus_no, reg;
84
85 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
86 if (bus->number != hose->first_busno)
87 return PCIBIOS_DEVICE_NOT_FOUND;
88 if (devfn != 0)
89 return PCIBIOS_DEVICE_NOT_FOUND;
90 }
91
92 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
93 if (bus->number != hose->first_busno)
94 cfg_type = 1;
95
96 bus_no = (bus->number == hose->first_busno) ?
97 hose->self_busno : bus->number;
98
99 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
100 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
101 else
102 reg = offset & 0xfc;
103
104 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
105 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
106 (devfn << 8) | reg | cfg_type));
107 else
108 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
109 (devfn << 8) | reg | cfg_type));
110
111 /* surpress setting of PCI_PRIMARY_BUS */
112 if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
113 if ((offset == PCI_PRIMARY_BUS) &&
114 (bus->number == hose->first_busno))
115 val &= 0xffffff00;
116
117 /* Workaround for PCI_28 Errata in 440EPx/GRx */
118 if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
119 offset == PCI_CACHE_LINE_SIZE) {
120 val = 0;
121 }
122
123 /*
124 * Note: the caller has already checked that offset is
125 * suitably aligned and that len is 1, 2 or 4.
126 */
127 cfg_data = hose->cfg_data + (offset & 3);
128 switch (len) {
129 case 1:
130 out_8(cfg_data, val);
131 break;
132 case 2:
133 out_le16(cfg_data, val);
134 break;
135 default:
136 out_le32(cfg_data, val);
137 break;
138 }
139
140 return PCIBIOS_SUCCESSFUL;
141}
142
143static struct pci_ops indirect_pci_ops = {
144 .read = indirect_read_config,
145 .write = indirect_write_config,
146};
147
148void __init
149setup_indirect_pci(struct pci_controller *hose,
150 resource_size_t cfg_addr,
151 resource_size_t cfg_data, u32 flags)
152{
153 resource_size_t base = cfg_addr & PAGE_MASK;
154 void __iomem *mbase;
155
156 mbase = ioremap(base, PAGE_SIZE);
157 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
158 if ((cfg_data & PAGE_MASK) != base)
159 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
160 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
161 hose->ops = &indirect_pci_ops;
162 hose->indirect_type = flags;
163}
diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c
new file mode 100644
index 000000000000..3fbf16f4e16c
--- /dev/null
+++ b/arch/microblaze/pci/iomap.c
@@ -0,0 +1,39 @@
1/*
2 * ppc64 "iomap" interface implementation.
3 *
4 * (C) Copyright 2004 Linus Torvalds
5 */
6#include <linux/init.h>
7#include <linux/pci.h>
8#include <linux/mm.h>
9#include <asm/io.h>
10#include <asm/pci-bridge.h>
11
12void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
13{
14 resource_size_t start = pci_resource_start(dev, bar);
15 resource_size_t len = pci_resource_len(dev, bar);
16 unsigned long flags = pci_resource_flags(dev, bar);
17
18 if (!len)
19 return NULL;
20 if (max && len > max)
21 len = max;
22 if (flags & IORESOURCE_IO)
23 return ioport_map(start, len);
24 if (flags & IORESOURCE_MEM)
25 return ioremap(start, len);
26 /* What? */
27 return NULL;
28}
29EXPORT_SYMBOL(pci_iomap);
30
31void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
32{
33 if (isa_vaddr_is_ioport(addr))
34 return;
35 if (pcibios_vaddr_is_ioport(addr))
36 return;
37 iounmap(addr);
38}
39EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
new file mode 100644
index 000000000000..740bb32ec57e
--- /dev/null
+++ b/arch/microblaze/pci/pci-common.c
@@ -0,0 +1,1643 @@
1/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29#include <linux/slab.h>
30
31#include <asm/processor.h>
32#include <asm/io.h>
33#include <asm/prom.h>
34#include <asm/pci-bridge.h>
35#include <asm/byteorder.h>
36
37static DEFINE_SPINLOCK(hose_spinlock);
38LIST_HEAD(hose_list);
39
40/* XXX kill that some day ... */
41static int global_phb_number; /* Global phb counter */
42
43/* ISA Memory physical address */
44resource_size_t isa_mem_base;
45
46/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
47unsigned int pci_flags;
48
49static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
50
51void set_pci_dma_ops(struct dma_map_ops *dma_ops)
52{
53 pci_dma_ops = dma_ops;
54}
55
56struct dma_map_ops *get_pci_dma_ops(void)
57{
58 return pci_dma_ops;
59}
60EXPORT_SYMBOL(get_pci_dma_ops);
61
62int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
63{
64 return dma_set_mask(&dev->dev, mask);
65}
66
67int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
68{
69 int rc;
70
71 rc = dma_set_mask(&dev->dev, mask);
72 dev->dev.coherent_dma_mask = dev->dma_mask;
73
74 return rc;
75}
76
77struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
78{
79 struct pci_controller *phb;
80
81 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
82 if (!phb)
83 return NULL;
84 spin_lock(&hose_spinlock);
85 phb->global_number = global_phb_number++;
86 list_add_tail(&phb->list_node, &hose_list);
87 spin_unlock(&hose_spinlock);
88 phb->dn = dev;
89 phb->is_dynamic = mem_init_done;
90 return phb;
91}
92
93void pcibios_free_controller(struct pci_controller *phb)
94{
95 spin_lock(&hose_spinlock);
96 list_del(&phb->list_node);
97 spin_unlock(&hose_spinlock);
98
99 if (phb->is_dynamic)
100 kfree(phb);
101}
102
103static resource_size_t pcibios_io_size(const struct pci_controller *hose)
104{
105 return hose->io_resource.end - hose->io_resource.start + 1;
106}
107
108int pcibios_vaddr_is_ioport(void __iomem *address)
109{
110 int ret = 0;
111 struct pci_controller *hose;
112 resource_size_t size;
113
114 spin_lock(&hose_spinlock);
115 list_for_each_entry(hose, &hose_list, list_node) {
116 size = pcibios_io_size(hose);
117 if (address >= hose->io_base_virt &&
118 address < (hose->io_base_virt + size)) {
119 ret = 1;
120 break;
121 }
122 }
123 spin_unlock(&hose_spinlock);
124 return ret;
125}
126
127unsigned long pci_address_to_pio(phys_addr_t address)
128{
129 struct pci_controller *hose;
130 resource_size_t size;
131 unsigned long ret = ~0;
132
133 spin_lock(&hose_spinlock);
134 list_for_each_entry(hose, &hose_list, list_node) {
135 size = pcibios_io_size(hose);
136 if (address >= hose->io_base_phys &&
137 address < (hose->io_base_phys + size)) {
138 unsigned long base =
139 (unsigned long)hose->io_base_virt - _IO_BASE;
140 ret = base + (address - hose->io_base_phys);
141 break;
142 }
143 }
144 spin_unlock(&hose_spinlock);
145
146 return ret;
147}
148EXPORT_SYMBOL_GPL(pci_address_to_pio);
149
150/*
151 * Return the domain number for this bus.
152 */
153int pci_domain_nr(struct pci_bus *bus)
154{
155 struct pci_controller *hose = pci_bus_to_host(bus);
156
157 return hose->global_number;
158}
159EXPORT_SYMBOL(pci_domain_nr);
160
161/* This routine is meant to be used early during boot, when the
162 * PCI bus numbers have not yet been assigned, and you need to
163 * issue PCI config cycles to an OF device.
164 * It could also be used to "fix" RTAS config cycles if you want
165 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
166 * config cycles.
167 */
168struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
169{
170 while (node) {
171 struct pci_controller *hose, *tmp;
172 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
173 if (hose->dn == node)
174 return hose;
175 node = node->parent;
176 }
177 return NULL;
178}
179
180static ssize_t pci_show_devspec(struct device *dev,
181 struct device_attribute *attr, char *buf)
182{
183 struct pci_dev *pdev;
184 struct device_node *np;
185
186 pdev = to_pci_dev(dev);
187 np = pci_device_to_OF_node(pdev);
188 if (np == NULL || np->full_name == NULL)
189 return 0;
190 return sprintf(buf, "%s", np->full_name);
191}
192static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
193
194/* Add sysfs properties */
195int pcibios_add_platform_entries(struct pci_dev *pdev)
196{
197 return device_create_file(&pdev->dev, &dev_attr_devspec);
198}
199
200char __devinit *pcibios_setup(char *str)
201{
202 return str;
203}
204
205/*
206 * Reads the interrupt pin to determine if interrupt is use by card.
207 * If the interrupt is used, then gets the interrupt line from the
208 * openfirmware and sets it in the pci_dev and pci_config line.
209 */
210int pci_read_irq_line(struct pci_dev *pci_dev)
211{
212 struct of_irq oirq;
213 unsigned int virq;
214
215 /* The current device-tree that iSeries generates from the HV
216 * PCI informations doesn't contain proper interrupt routing,
217 * and all the fallback would do is print out crap, so we
218 * don't attempt to resolve the interrupts here at all, some
219 * iSeries specific fixup does it.
220 *
221 * In the long run, we will hopefully fix the generated device-tree
222 * instead.
223 */
224 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
225
226#ifdef DEBUG
227 memset(&oirq, 0xff, sizeof(oirq));
228#endif
229 /* Try to get a mapping from the device-tree */
230 if (of_irq_map_pci(pci_dev, &oirq)) {
231 u8 line, pin;
232
233 /* If that fails, lets fallback to what is in the config
234 * space and map that through the default controller. We
235 * also set the type to level low since that's what PCI
236 * interrupts are. If your platform does differently, then
237 * either provide a proper interrupt tree or don't use this
238 * function.
239 */
240 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
241 return -1;
242 if (pin == 0)
243 return -1;
244 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
245 line == 0xff || line == 0) {
246 return -1;
247 }
248 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
249 line, pin);
250
251 virq = irq_create_mapping(NULL, line);
252 if (virq != NO_IRQ)
253 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
254 } else {
255 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
256 oirq.size, oirq.specifier[0], oirq.specifier[1],
257 oirq.controller ? oirq.controller->full_name :
258 "<default>");
259
260 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
261 oirq.size);
262 }
263 if (virq == NO_IRQ) {
264 pr_debug(" Failed to map !\n");
265 return -1;
266 }
267
268 pr_debug(" Mapped to linux irq %d\n", virq);
269
270 pci_dev->irq = virq;
271
272 return 0;
273}
274EXPORT_SYMBOL(pci_read_irq_line);
275
276/*
277 * Platform support for /proc/bus/pci/X/Y mmap()s,
278 * modelled on the sparc64 implementation by Dave Miller.
279 * -- paulus.
280 */
281
282/*
283 * Adjust vm_pgoff of VMA such that it is the physical page offset
284 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
285 *
286 * Basically, the user finds the base address for his device which he wishes
287 * to mmap. They read the 32-bit value from the config space base register,
288 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
289 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
290 *
291 * Returns negative error code on failure, zero on success.
292 */
293static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
294 resource_size_t *offset,
295 enum pci_mmap_state mmap_state)
296{
297 struct pci_controller *hose = pci_bus_to_host(dev->bus);
298 unsigned long io_offset = 0;
299 int i, res_bit;
300
301 if (hose == 0)
302 return NULL; /* should never happen */
303
304 /* If memory, add on the PCI bridge address offset */
305 if (mmap_state == pci_mmap_mem) {
306#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
307 *offset += hose->pci_mem_offset;
308#endif
309 res_bit = IORESOURCE_MEM;
310 } else {
311 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
312 *offset += io_offset;
313 res_bit = IORESOURCE_IO;
314 }
315
316 /*
317 * Check that the offset requested corresponds to one of the
318 * resources of the device.
319 */
320 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
321 struct resource *rp = &dev->resource[i];
322 int flags = rp->flags;
323
324 /* treat ROM as memory (should be already) */
325 if (i == PCI_ROM_RESOURCE)
326 flags |= IORESOURCE_MEM;
327
328 /* Active and same type? */
329 if ((flags & res_bit) == 0)
330 continue;
331
332 /* In the range of this resource? */
333 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
334 continue;
335
336 /* found it! construct the final physical address */
337 if (mmap_state == pci_mmap_io)
338 *offset += hose->io_base_phys - io_offset;
339 return rp;
340 }
341
342 return NULL;
343}
344
345/*
346 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
347 * device mapping.
348 */
349static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
350 pgprot_t protection,
351 enum pci_mmap_state mmap_state,
352 int write_combine)
353{
354 pgprot_t prot = protection;
355
356 /* Write combine is always 0 on non-memory space mappings. On
357 * memory space, if the user didn't pass 1, we check for a
358 * "prefetchable" resource. This is a bit hackish, but we use
359 * this to workaround the inability of /sysfs to provide a write
360 * combine bit
361 */
362 if (mmap_state != pci_mmap_mem)
363 write_combine = 0;
364 else if (write_combine == 0) {
365 if (rp->flags & IORESOURCE_PREFETCH)
366 write_combine = 1;
367 }
368
369 return pgprot_noncached(prot);
370}
371
372/*
373 * This one is used by /dev/mem and fbdev who have no clue about the
374 * PCI device, it tries to find the PCI device first and calls the
375 * above routine
376 */
377pgprot_t pci_phys_mem_access_prot(struct file *file,
378 unsigned long pfn,
379 unsigned long size,
380 pgprot_t prot)
381{
382 struct pci_dev *pdev = NULL;
383 struct resource *found = NULL;
384 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
385 int i;
386
387 if (page_is_ram(pfn))
388 return prot;
389
390 prot = pgprot_noncached(prot);
391 for_each_pci_dev(pdev) {
392 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
393 struct resource *rp = &pdev->resource[i];
394 int flags = rp->flags;
395
396 /* Active and same type? */
397 if ((flags & IORESOURCE_MEM) == 0)
398 continue;
399 /* In the range of this resource? */
400 if (offset < (rp->start & PAGE_MASK) ||
401 offset > rp->end)
402 continue;
403 found = rp;
404 break;
405 }
406 if (found)
407 break;
408 }
409 if (found) {
410 if (found->flags & IORESOURCE_PREFETCH)
411 prot = pgprot_noncached_wc(prot);
412 pci_dev_put(pdev);
413 }
414
415 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
416 (unsigned long long)offset, pgprot_val(prot));
417
418 return prot;
419}
420
421/*
422 * Perform the actual remap of the pages for a PCI device mapping, as
423 * appropriate for this architecture. The region in the process to map
424 * is described by vm_start and vm_end members of VMA, the base physical
425 * address is found in vm_pgoff.
426 * The pci device structure is provided so that architectures may make mapping
427 * decisions on a per-device or per-bus basis.
428 *
429 * Returns a negative error code on failure, zero on success.
430 */
431int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
432 enum pci_mmap_state mmap_state, int write_combine)
433{
434 resource_size_t offset =
435 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
436 struct resource *rp;
437 int ret;
438
439 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
440 if (rp == NULL)
441 return -EINVAL;
442
443 vma->vm_pgoff = offset >> PAGE_SHIFT;
444 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
445 vma->vm_page_prot,
446 mmap_state, write_combine);
447
448 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
449 vma->vm_end - vma->vm_start, vma->vm_page_prot);
450
451 return ret;
452}
453
454/* This provides legacy IO read access on a bus */
455int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
456{
457 unsigned long offset;
458 struct pci_controller *hose = pci_bus_to_host(bus);
459 struct resource *rp = &hose->io_resource;
460 void __iomem *addr;
461
462 /* Check if port can be supported by that bus. We only check
463 * the ranges of the PHB though, not the bus itself as the rules
464 * for forwarding legacy cycles down bridges are not our problem
465 * here. So if the host bridge supports it, we do it.
466 */
467 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
468 offset += port;
469
470 if (!(rp->flags & IORESOURCE_IO))
471 return -ENXIO;
472 if (offset < rp->start || (offset + size) > rp->end)
473 return -ENXIO;
474 addr = hose->io_base_virt + port;
475
476 switch (size) {
477 case 1:
478 *((u8 *)val) = in_8(addr);
479 return 1;
480 case 2:
481 if (port & 1)
482 return -EINVAL;
483 *((u16 *)val) = in_le16(addr);
484 return 2;
485 case 4:
486 if (port & 3)
487 return -EINVAL;
488 *((u32 *)val) = in_le32(addr);
489 return 4;
490 }
491 return -EINVAL;
492}
493
494/* This provides legacy IO write access on a bus */
495int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
496{
497 unsigned long offset;
498 struct pci_controller *hose = pci_bus_to_host(bus);
499 struct resource *rp = &hose->io_resource;
500 void __iomem *addr;
501
502 /* Check if port can be supported by that bus. We only check
503 * the ranges of the PHB though, not the bus itself as the rules
504 * for forwarding legacy cycles down bridges are not our problem
505 * here. So if the host bridge supports it, we do it.
506 */
507 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
508 offset += port;
509
510 if (!(rp->flags & IORESOURCE_IO))
511 return -ENXIO;
512 if (offset < rp->start || (offset + size) > rp->end)
513 return -ENXIO;
514 addr = hose->io_base_virt + port;
515
516 /* WARNING: The generic code is idiotic. It gets passed a pointer
517 * to what can be a 1, 2 or 4 byte quantity and always reads that
518 * as a u32, which means that we have to correct the location of
519 * the data read within those 32 bits for size 1 and 2
520 */
521 switch (size) {
522 case 1:
523 out_8(addr, val >> 24);
524 return 1;
525 case 2:
526 if (port & 1)
527 return -EINVAL;
528 out_le16(addr, val >> 16);
529 return 2;
530 case 4:
531 if (port & 3)
532 return -EINVAL;
533 out_le32(addr, val);
534 return 4;
535 }
536 return -EINVAL;
537}
538
539/* This provides legacy IO or memory mmap access on a bus */
540int pci_mmap_legacy_page_range(struct pci_bus *bus,
541 struct vm_area_struct *vma,
542 enum pci_mmap_state mmap_state)
543{
544 struct pci_controller *hose = pci_bus_to_host(bus);
545 resource_size_t offset =
546 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
547 resource_size_t size = vma->vm_end - vma->vm_start;
548 struct resource *rp;
549
550 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
551 pci_domain_nr(bus), bus->number,
552 mmap_state == pci_mmap_mem ? "MEM" : "IO",
553 (unsigned long long)offset,
554 (unsigned long long)(offset + size - 1));
555
556 if (mmap_state == pci_mmap_mem) {
557 /* Hack alert !
558 *
559 * Because X is lame and can fail starting if it gets an error
560 * trying to mmap legacy_mem (instead of just moving on without
561 * legacy memory access) we fake it here by giving it anonymous
562 * memory, effectively behaving just like /dev/zero
563 */
564 if ((offset + size) > hose->isa_mem_size) {
565#ifdef CONFIG_MMU
566 printk(KERN_DEBUG
567 "Process %s (pid:%d) mapped non-existing PCI"
568 "legacy memory for 0%04x:%02x\n",
569 current->comm, current->pid, pci_domain_nr(bus),
570 bus->number);
571#endif
572 if (vma->vm_flags & VM_SHARED)
573 return shmem_zero_setup(vma);
574 return 0;
575 }
576 offset += hose->isa_mem_phys;
577 } else {
578 unsigned long io_offset = (unsigned long)hose->io_base_virt - \
579 _IO_BASE;
580 unsigned long roffset = offset + io_offset;
581 rp = &hose->io_resource;
582 if (!(rp->flags & IORESOURCE_IO))
583 return -ENXIO;
584 if (roffset < rp->start || (roffset + size) > rp->end)
585 return -ENXIO;
586 offset += hose->io_base_phys;
587 }
588 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
589
590 vma->vm_pgoff = offset >> PAGE_SHIFT;
591 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
592 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
593 vma->vm_end - vma->vm_start,
594 vma->vm_page_prot);
595}
596
597void pci_resource_to_user(const struct pci_dev *dev, int bar,
598 const struct resource *rsrc,
599 resource_size_t *start, resource_size_t *end)
600{
601 struct pci_controller *hose = pci_bus_to_host(dev->bus);
602 resource_size_t offset = 0;
603
604 if (hose == NULL)
605 return;
606
607 if (rsrc->flags & IORESOURCE_IO)
608 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
609
610 /* We pass a fully fixed up address to userland for MMIO instead of
611 * a BAR value because X is lame and expects to be able to use that
612 * to pass to /dev/mem !
613 *
614 * That means that we'll have potentially 64 bits values where some
615 * userland apps only expect 32 (like X itself since it thinks only
616 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
617 * 32 bits CHRPs :-(
618 *
619 * Hopefully, the sysfs insterface is immune to that gunk. Once X
620 * has been fixed (and the fix spread enough), we can re-enable the
621 * 2 lines below and pass down a BAR value to userland. In that case
622 * we'll also have to re-enable the matching code in
623 * __pci_mmap_make_offset().
624 *
625 * BenH.
626 */
627#if 0
628 else if (rsrc->flags & IORESOURCE_MEM)
629 offset = hose->pci_mem_offset;
630#endif
631
632 *start = rsrc->start - offset;
633 *end = rsrc->end - offset;
634}
635
636/**
637 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
638 * @hose: newly allocated pci_controller to be setup
639 * @dev: device node of the host bridge
640 * @primary: set if primary bus (32 bits only, soon to be deprecated)
641 *
642 * This function will parse the "ranges" property of a PCI host bridge device
643 * node and setup the resource mapping of a pci controller based on its
644 * content.
645 *
646 * Life would be boring if it wasn't for a few issues that we have to deal
647 * with here:
648 *
649 * - We can only cope with one IO space range and up to 3 Memory space
650 * ranges. However, some machines (thanks Apple !) tend to split their
651 * space into lots of small contiguous ranges. So we have to coalesce.
652 *
653 * - We can only cope with all memory ranges having the same offset
654 * between CPU addresses and PCI addresses. Unfortunately, some bridges
655 * are setup for a large 1:1 mapping along with a small "window" which
656 * maps PCI address 0 to some arbitrary high address of the CPU space in
657 * order to give access to the ISA memory hole.
658 * The way out of here that I've chosen for now is to always set the
659 * offset based on the first resource found, then override it if we
660 * have a different offset and the previous was set by an ISA hole.
661 *
662 * - Some busses have IO space not starting at 0, which causes trouble with
663 * the way we do our IO resource renumbering. The code somewhat deals with
664 * it for 64 bits but I would expect problems on 32 bits.
665 *
666 * - Some 32 bits platforms such as 4xx can have physical space larger than
667 * 32 bits so we need to use 64 bits values for the parsing
668 */
669void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
670 struct device_node *dev,
671 int primary)
672{
673 const u32 *ranges;
674 int rlen;
675 int pna = of_n_addr_cells(dev);
676 int np = pna + 5;
677 int memno = 0, isa_hole = -1;
678 u32 pci_space;
679 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
680 unsigned long long isa_mb = 0;
681 struct resource *res;
682
683 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
684 dev->full_name, primary ? "(primary)" : "");
685
686 /* Get ranges property */
687 ranges = of_get_property(dev, "ranges", &rlen);
688 if (ranges == NULL)
689 return;
690
691 /* Parse it */
692 pr_debug("Parsing ranges property...\n");
693 while ((rlen -= np * 4) >= 0) {
694 /* Read next ranges element */
695 pci_space = ranges[0];
696 pci_addr = of_read_number(ranges + 1, 2);
697 cpu_addr = of_translate_address(dev, ranges + 3);
698 size = of_read_number(ranges + pna + 3, 2);
699
700 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
701 "cpu_addr:0x%016llx size:0x%016llx\n",
702 pci_space, pci_addr, cpu_addr, size);
703
704 ranges += np;
705
706 /* If we failed translation or got a zero-sized region
707 * (some FW try to feed us with non sensical zero sized regions
708 * such as power3 which look like some kind of attempt
709 * at exposing the VGA memory hole)
710 */
711 if (cpu_addr == OF_BAD_ADDR || size == 0)
712 continue;
713
714 /* Now consume following elements while they are contiguous */
715 for (; rlen >= np * sizeof(u32);
716 ranges += np, rlen -= np * 4) {
717 if (ranges[0] != pci_space)
718 break;
719 pci_next = of_read_number(ranges + 1, 2);
720 cpu_next = of_translate_address(dev, ranges + 3);
721 if (pci_next != pci_addr + size ||
722 cpu_next != cpu_addr + size)
723 break;
724 size += of_read_number(ranges + pna + 3, 2);
725 }
726
727 /* Act based on address space type */
728 res = NULL;
729 switch ((pci_space >> 24) & 0x3) {
730 case 1: /* PCI IO space */
731 printk(KERN_INFO
732 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
733 cpu_addr, cpu_addr + size - 1, pci_addr);
734
735 /* We support only one IO range */
736 if (hose->pci_io_size) {
737 printk(KERN_INFO
738 " \\--> Skipped (too many) !\n");
739 continue;
740 }
741 /* On 32 bits, limit I/O space to 16MB */
742 if (size > 0x01000000)
743 size = 0x01000000;
744
745 /* 32 bits needs to map IOs here */
746 hose->io_base_virt = ioremap(cpu_addr, size);
747
748 /* Expect trouble if pci_addr is not 0 */
749 if (primary)
750 isa_io_base =
751 (unsigned long)hose->io_base_virt;
752 /* pci_io_size and io_base_phys always represent IO
753 * space starting at 0 so we factor in pci_addr
754 */
755 hose->pci_io_size = pci_addr + size;
756 hose->io_base_phys = cpu_addr - pci_addr;
757
758 /* Build resource */
759 res = &hose->io_resource;
760 res->flags = IORESOURCE_IO;
761 res->start = pci_addr;
762 break;
763 case 2: /* PCI Memory space */
764 case 3: /* PCI 64 bits Memory space */
765 printk(KERN_INFO
766 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
767 cpu_addr, cpu_addr + size - 1, pci_addr,
768 (pci_space & 0x40000000) ? "Prefetch" : "");
769
770 /* We support only 3 memory ranges */
771 if (memno >= 3) {
772 printk(KERN_INFO
773 " \\--> Skipped (too many) !\n");
774 continue;
775 }
776 /* Handles ISA memory hole space here */
777 if (pci_addr == 0) {
778 isa_mb = cpu_addr;
779 isa_hole = memno;
780 if (primary || isa_mem_base == 0)
781 isa_mem_base = cpu_addr;
782 hose->isa_mem_phys = cpu_addr;
783 hose->isa_mem_size = size;
784 }
785
786 /* We get the PCI/Mem offset from the first range or
787 * the, current one if the offset came from an ISA
788 * hole. If they don't match, bugger.
789 */
790 if (memno == 0 ||
791 (isa_hole >= 0 && pci_addr != 0 &&
792 hose->pci_mem_offset == isa_mb))
793 hose->pci_mem_offset = cpu_addr - pci_addr;
794 else if (pci_addr != 0 &&
795 hose->pci_mem_offset != cpu_addr - pci_addr) {
796 printk(KERN_INFO
797 " \\--> Skipped (offset mismatch) !\n");
798 continue;
799 }
800
801 /* Build resource */
802 res = &hose->mem_resources[memno++];
803 res->flags = IORESOURCE_MEM;
804 if (pci_space & 0x40000000)
805 res->flags |= IORESOURCE_PREFETCH;
806 res->start = cpu_addr;
807 break;
808 }
809 if (res != NULL) {
810 res->name = dev->full_name;
811 res->end = res->start + size - 1;
812 res->parent = NULL;
813 res->sibling = NULL;
814 res->child = NULL;
815 }
816 }
817
818 /* If there's an ISA hole and the pci_mem_offset is -not- matching
819 * the ISA hole offset, then we need to remove the ISA hole from
820 * the resource list for that brige
821 */
822 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
823 unsigned int next = isa_hole + 1;
824 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
825 if (next < memno)
826 memmove(&hose->mem_resources[isa_hole],
827 &hose->mem_resources[next],
828 sizeof(struct resource) * (memno - next));
829 hose->mem_resources[--memno].flags = 0;
830 }
831}
832
833/* Decide whether to display the domain number in /proc */
834int pci_proc_domain(struct pci_bus *bus)
835{
836 struct pci_controller *hose = pci_bus_to_host(bus);
837
838 if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS))
839 return 0;
840 if (pci_flags & PCI_COMPAT_DOMAIN_0)
841 return hose->global_number != 0;
842 return 1;
843}
844
845void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
846 struct resource *res)
847{
848 resource_size_t offset = 0, mask = (resource_size_t)-1;
849 struct pci_controller *hose = pci_bus_to_host(dev->bus);
850
851 if (!hose)
852 return;
853 if (res->flags & IORESOURCE_IO) {
854 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
855 mask = 0xffffffffu;
856 } else if (res->flags & IORESOURCE_MEM)
857 offset = hose->pci_mem_offset;
858
859 region->start = (res->start - offset) & mask;
860 region->end = (res->end - offset) & mask;
861}
862EXPORT_SYMBOL(pcibios_resource_to_bus);
863
864void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
865 struct pci_bus_region *region)
866{
867 resource_size_t offset = 0, mask = (resource_size_t)-1;
868 struct pci_controller *hose = pci_bus_to_host(dev->bus);
869
870 if (!hose)
871 return;
872 if (res->flags & IORESOURCE_IO) {
873 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
874 mask = 0xffffffffu;
875 } else if (res->flags & IORESOURCE_MEM)
876 offset = hose->pci_mem_offset;
877 res->start = (region->start + offset) & mask;
878 res->end = (region->end + offset) & mask;
879}
880EXPORT_SYMBOL(pcibios_bus_to_resource);
881
882/* Fixup a bus resource into a linux resource */
883static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
884{
885 struct pci_controller *hose = pci_bus_to_host(dev->bus);
886 resource_size_t offset = 0, mask = (resource_size_t)-1;
887
888 if (res->flags & IORESOURCE_IO) {
889 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
890 mask = 0xffffffffu;
891 } else if (res->flags & IORESOURCE_MEM)
892 offset = hose->pci_mem_offset;
893
894 res->start = (res->start + offset) & mask;
895 res->end = (res->end + offset) & mask;
896}
897
898/* This header fixup will do the resource fixup for all devices as they are
899 * probed, but not for bridge ranges
900 */
901static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
902{
903 struct pci_controller *hose = pci_bus_to_host(dev->bus);
904 int i;
905
906 if (!hose) {
907 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
908 pci_name(dev));
909 return;
910 }
911 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
912 struct resource *res = dev->resource + i;
913 if (!res->flags)
914 continue;
915 /* On platforms that have PCI_PROBE_ONLY set, we don't
916 * consider 0 as an unassigned BAR value. It's technically
917 * a valid value, but linux doesn't like it... so when we can
918 * re-assign things, we do so, but if we can't, we keep it
919 * around and hope for the best...
920 */
921 if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) {
922 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
923 "is unassigned\n",
924 pci_name(dev), i,
925 (unsigned long long)res->start,
926 (unsigned long long)res->end,
927 (unsigned int)res->flags);
928 res->end -= res->start;
929 res->start = 0;
930 res->flags |= IORESOURCE_UNSET;
931 continue;
932 }
933
934 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
935 pci_name(dev), i,
936 (unsigned long long)res->start,\
937 (unsigned long long)res->end,
938 (unsigned int)res->flags);
939
940 fixup_resource(res, dev);
941
942 pr_debug("PCI:%s %016llx-%016llx\n",
943 pci_name(dev),
944 (unsigned long long)res->start,
945 (unsigned long long)res->end);
946 }
947}
948DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
949
950/* This function tries to figure out if a bridge resource has been initialized
951 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
952 * things go more smoothly when it gets it right. It should covers cases such
953 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
954 */
955static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
956 struct resource *res)
957{
958 struct pci_controller *hose = pci_bus_to_host(bus);
959 struct pci_dev *dev = bus->self;
960 resource_size_t offset;
961 u16 command;
962 int i;
963
964 /* We don't do anything if PCI_PROBE_ONLY is set */
965 if (pci_flags & PCI_PROBE_ONLY)
966 return 0;
967
968 /* Job is a bit different between memory and IO */
969 if (res->flags & IORESOURCE_MEM) {
970 /* If the BAR is non-0 (res != pci_mem_offset) then it's
971 * probably been initialized by somebody
972 */
973 if (res->start != hose->pci_mem_offset)
974 return 0;
975
976 /* The BAR is 0, let's check if memory decoding is enabled on
977 * the bridge. If not, we consider it unassigned
978 */
979 pci_read_config_word(dev, PCI_COMMAND, &command);
980 if ((command & PCI_COMMAND_MEMORY) == 0)
981 return 1;
982
983 /* Memory decoding is enabled and the BAR is 0. If any of
984 * the bridge resources covers that starting address (0 then
985 * it's good enough for us for memory
986 */
987 for (i = 0; i < 3; i++) {
988 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
989 hose->mem_resources[i].start == hose->pci_mem_offset)
990 return 0;
991 }
992
993 /* Well, it starts at 0 and we know it will collide so we may as
994 * well consider it as unassigned. That covers the Apple case.
995 */
996 return 1;
997 } else {
998 /* If the BAR is non-0, then we consider it assigned */
999 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1000 if (((res->start - offset) & 0xfffffffful) != 0)
1001 return 0;
1002
1003 /* Here, we are a bit different than memory as typically IO
1004 * space starting at low addresses -is- valid. What we do
1005 * instead if that we consider as unassigned anything that
1006 * doesn't have IO enabled in the PCI command register,
1007 * and that's it.
1008 */
1009 pci_read_config_word(dev, PCI_COMMAND, &command);
1010 if (command & PCI_COMMAND_IO)
1011 return 0;
1012
1013 /* It's starting at 0 and IO is disabled in the bridge, consider
1014 * it unassigned
1015 */
1016 return 1;
1017 }
1018}
1019
1020/* Fixup resources of a PCI<->PCI bridge */
1021static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1022{
1023 struct resource *res;
1024 int i;
1025
1026 struct pci_dev *dev = bus->self;
1027
1028 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1029 res = bus->resource[i];
1030 if (!res)
1031 continue;
1032 if (!res->flags)
1033 continue;
1034 if (i >= 3 && bus->self->transparent)
1035 continue;
1036
1037 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
1038 pci_name(dev), i,
1039 (unsigned long long)res->start,\
1040 (unsigned long long)res->end,
1041 (unsigned int)res->flags);
1042
1043 /* Perform fixup */
1044 fixup_resource(res, dev);
1045
1046 /* Try to detect uninitialized P2P bridge resources,
1047 * and clear them out so they get re-assigned later
1048 */
1049 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1050 res->flags = 0;
1051 pr_debug("PCI:%s (unassigned)\n",
1052 pci_name(dev));
1053 } else {
1054 pr_debug("PCI:%s %016llx-%016llx\n",
1055 pci_name(dev),
1056 (unsigned long long)res->start,
1057 (unsigned long long)res->end);
1058 }
1059 }
1060}
1061
1062void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1063{
1064 /* Fix up the bus resources for P2P bridges */
1065 if (bus->self != NULL)
1066 pcibios_fixup_bridge(bus);
1067}
1068
1069void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1070{
1071 struct pci_dev *dev;
1072
1073 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1074 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1075
1076 list_for_each_entry(dev, &bus->devices, bus_list) {
1077 struct dev_archdata *sd = &dev->dev.archdata;
1078
1079 /* Setup OF node pointer in archdata */
1080 sd->of_node = pci_device_to_OF_node(dev);
1081
1082 /* Fixup NUMA node as it may not be setup yet by the generic
1083 * code and is needed by the DMA init
1084 */
1085 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1086
1087 /* Hook up default DMA ops */
1088 sd->dma_ops = pci_dma_ops;
1089 sd->dma_data = (void *)PCI_DRAM_OFFSET;
1090
1091 /* Read default IRQs and fixup if necessary */
1092 pci_read_irq_line(dev);
1093 }
1094}
1095
1096void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1097{
1098 /* When called from the generic PCI probe, read PCI<->PCI bridge
1099 * bases. This is -not- called when generating the PCI tree from
1100 * the OF device-tree.
1101 */
1102 if (bus->self != NULL)
1103 pci_read_bridge_bases(bus);
1104
1105 /* Now fixup the bus bus */
1106 pcibios_setup_bus_self(bus);
1107
1108 /* Now fixup devices on that bus */
1109 pcibios_setup_bus_devices(bus);
1110}
1111EXPORT_SYMBOL(pcibios_fixup_bus);
1112
1113static int skip_isa_ioresource_align(struct pci_dev *dev)
1114{
1115 if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) &&
1116 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1117 return 1;
1118 return 0;
1119}
1120
1121/*
1122 * We need to avoid collisions with `mirrored' VGA ports
1123 * and other strange ISA hardware, so we always want the
1124 * addresses to be allocated in the 0x000-0x0ff region
1125 * modulo 0x400.
1126 *
1127 * Why? Because some silly external IO cards only decode
1128 * the low 10 bits of the IO address. The 0x00-0xff region
1129 * is reserved for motherboard devices that decode all 16
1130 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1131 * but we want to try to avoid allocating at 0x2900-0x2bff
1132 * which might have be mirrored at 0x0100-0x03ff..
1133 */
1134void pcibios_align_resource(void *data, struct resource *res,
1135 resource_size_t size, resource_size_t align)
1136{
1137 struct pci_dev *dev = data;
1138
1139 if (res->flags & IORESOURCE_IO) {
1140 resource_size_t start = res->start;
1141
1142 if (skip_isa_ioresource_align(dev))
1143 return;
1144 if (start & 0x300) {
1145 start = (start + 0x3ff) & ~0x3ff;
1146 res->start = start;
1147 }
1148 }
1149}
1150EXPORT_SYMBOL(pcibios_align_resource);
1151
1152/*
1153 * Reparent resource children of pr that conflict with res
1154 * under res, and make res replace those children.
1155 */
1156static int __init reparent_resources(struct resource *parent,
1157 struct resource *res)
1158{
1159 struct resource *p, **pp;
1160 struct resource **firstpp = NULL;
1161
1162 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1163 if (p->end < res->start)
1164 continue;
1165 if (res->end < p->start)
1166 break;
1167 if (p->start < res->start || p->end > res->end)
1168 return -1; /* not completely contained */
1169 if (firstpp == NULL)
1170 firstpp = pp;
1171 }
1172 if (firstpp == NULL)
1173 return -1; /* didn't find any conflicting entries? */
1174 res->parent = parent;
1175 res->child = *firstpp;
1176 res->sibling = *pp;
1177 *firstpp = res;
1178 *pp = NULL;
1179 for (p = res->child; p != NULL; p = p->sibling) {
1180 p->parent = res;
1181 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1182 p->name,
1183 (unsigned long long)p->start,
1184 (unsigned long long)p->end, res->name);
1185 }
1186 return 0;
1187}
1188
1189/*
1190 * Handle resources of PCI devices. If the world were perfect, we could
1191 * just allocate all the resource regions and do nothing more. It isn't.
1192 * On the other hand, we cannot just re-allocate all devices, as it would
1193 * require us to know lots of host bridge internals. So we attempt to
1194 * keep as much of the original configuration as possible, but tweak it
1195 * when it's found to be wrong.
1196 *
1197 * Known BIOS problems we have to work around:
1198 * - I/O or memory regions not configured
1199 * - regions configured, but not enabled in the command register
1200 * - bogus I/O addresses above 64K used
1201 * - expansion ROMs left enabled (this may sound harmless, but given
1202 * the fact the PCI specs explicitly allow address decoders to be
1203 * shared between expansion ROMs and other resource regions, it's
1204 * at least dangerous)
1205 *
1206 * Our solution:
1207 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1208 * This gives us fixed barriers on where we can allocate.
1209 * (2) Allocate resources for all enabled devices. If there is
1210 * a collision, just mark the resource as unallocated. Also
1211 * disable expansion ROMs during this step.
1212 * (3) Try to allocate resources for disabled devices. If the
1213 * resources were assigned correctly, everything goes well,
1214 * if they weren't, they won't disturb allocation of other
1215 * resources.
1216 * (4) Assign new addresses to resources which were either
1217 * not configured at all or misconfigured. If explicitly
1218 * requested by the user, configure expansion ROM address
1219 * as well.
1220 */
1221
1222void pcibios_allocate_bus_resources(struct pci_bus *bus)
1223{
1224 struct pci_bus *b;
1225 int i;
1226 struct resource *res, *pr;
1227
1228 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1229 pci_domain_nr(bus), bus->number);
1230
1231 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1232 res = bus->resource[i];
1233 if (!res || !res->flags
1234 || res->start > res->end || res->parent)
1235 continue;
1236 if (bus->parent == NULL)
1237 pr = (res->flags & IORESOURCE_IO) ?
1238 &ioport_resource : &iomem_resource;
1239 else {
1240 /* Don't bother with non-root busses when
1241 * re-assigning all resources. We clear the
1242 * resource flags as if they were colliding
1243 * and as such ensure proper re-allocation
1244 * later.
1245 */
1246 if (pci_flags & PCI_REASSIGN_ALL_RSRC)
1247 goto clear_resource;
1248 pr = pci_find_parent_resource(bus->self, res);
1249 if (pr == res) {
1250 /* this happens when the generic PCI
1251 * code (wrongly) decides that this
1252 * bridge is transparent -- paulus
1253 */
1254 continue;
1255 }
1256 }
1257
1258 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1259 "[0x%x], parent %p (%s)\n",
1260 bus->self ? pci_name(bus->self) : "PHB",
1261 bus->number, i,
1262 (unsigned long long)res->start,
1263 (unsigned long long)res->end,
1264 (unsigned int)res->flags,
1265 pr, (pr && pr->name) ? pr->name : "nil");
1266
1267 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1268 if (request_resource(pr, res) == 0)
1269 continue;
1270 /*
1271 * Must be a conflict with an existing entry.
1272 * Move that entry (or entries) under the
1273 * bridge resource and try again.
1274 */
1275 if (reparent_resources(pr, res) == 0)
1276 continue;
1277 }
1278 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1279 "%d of PCI bridge %d, will remap\n", i, bus->number);
1280clear_resource:
1281 res->flags = 0;
1282 }
1283
1284 list_for_each_entry(b, &bus->children, node)
1285 pcibios_allocate_bus_resources(b);
1286}
1287
1288static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1289{
1290 struct resource *pr, *r = &dev->resource[idx];
1291
1292 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1293 pci_name(dev), idx,
1294 (unsigned long long)r->start,
1295 (unsigned long long)r->end,
1296 (unsigned int)r->flags);
1297
1298 pr = pci_find_parent_resource(dev, r);
1299 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1300 request_resource(pr, r) < 0) {
1301 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1302 " of device %s, will remap\n", idx, pci_name(dev));
1303 if (pr)
1304 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1305 pr,
1306 (unsigned long long)pr->start,
1307 (unsigned long long)pr->end,
1308 (unsigned int)pr->flags);
1309 /* We'll assign a new address later */
1310 r->flags |= IORESOURCE_UNSET;
1311 r->end -= r->start;
1312 r->start = 0;
1313 }
1314}
1315
1316static void __init pcibios_allocate_resources(int pass)
1317{
1318 struct pci_dev *dev = NULL;
1319 int idx, disabled;
1320 u16 command;
1321 struct resource *r;
1322
1323 for_each_pci_dev(dev) {
1324 pci_read_config_word(dev, PCI_COMMAND, &command);
1325 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1326 r = &dev->resource[idx];
1327 if (r->parent) /* Already allocated */
1328 continue;
1329 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1330 continue; /* Not assigned at all */
1331 /* We only allocate ROMs on pass 1 just in case they
1332 * have been screwed up by firmware
1333 */
1334 if (idx == PCI_ROM_RESOURCE)
1335 disabled = 1;
1336 if (r->flags & IORESOURCE_IO)
1337 disabled = !(command & PCI_COMMAND_IO);
1338 else
1339 disabled = !(command & PCI_COMMAND_MEMORY);
1340 if (pass == disabled)
1341 alloc_resource(dev, idx);
1342 }
1343 if (pass)
1344 continue;
1345 r = &dev->resource[PCI_ROM_RESOURCE];
1346 if (r->flags) {
1347 /* Turn the ROM off, leave the resource region,
1348 * but keep it unregistered.
1349 */
1350 u32 reg;
1351 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1352 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1353 pr_debug("PCI: Switching off ROM of %s\n",
1354 pci_name(dev));
1355 r->flags &= ~IORESOURCE_ROM_ENABLE;
1356 pci_write_config_dword(dev, dev->rom_base_reg,
1357 reg & ~PCI_ROM_ADDRESS_ENABLE);
1358 }
1359 }
1360 }
1361}
1362
1363static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1364{
1365 struct pci_controller *hose = pci_bus_to_host(bus);
1366 resource_size_t offset;
1367 struct resource *res, *pres;
1368 int i;
1369
1370 pr_debug("Reserving legacy ranges for domain %04x\n",
1371 pci_domain_nr(bus));
1372
1373 /* Check for IO */
1374 if (!(hose->io_resource.flags & IORESOURCE_IO))
1375 goto no_io;
1376 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1377 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1378 BUG_ON(res == NULL);
1379 res->name = "Legacy IO";
1380 res->flags = IORESOURCE_IO;
1381 res->start = offset;
1382 res->end = (offset + 0xfff) & 0xfffffffful;
1383 pr_debug("Candidate legacy IO: %pR\n", res);
1384 if (request_resource(&hose->io_resource, res)) {
1385 printk(KERN_DEBUG
1386 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1387 pci_domain_nr(bus), bus->number, res);
1388 kfree(res);
1389 }
1390
1391 no_io:
1392 /* Check for memory */
1393 offset = hose->pci_mem_offset;
1394 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1395 for (i = 0; i < 3; i++) {
1396 pres = &hose->mem_resources[i];
1397 if (!(pres->flags & IORESOURCE_MEM))
1398 continue;
1399 pr_debug("hose mem res: %pR\n", pres);
1400 if ((pres->start - offset) <= 0xa0000 &&
1401 (pres->end - offset) >= 0xbffff)
1402 break;
1403 }
1404 if (i >= 3)
1405 return;
1406 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1407 BUG_ON(res == NULL);
1408 res->name = "Legacy VGA memory";
1409 res->flags = IORESOURCE_MEM;
1410 res->start = 0xa0000 + offset;
1411 res->end = 0xbffff + offset;
1412 pr_debug("Candidate VGA memory: %pR\n", res);
1413 if (request_resource(pres, res)) {
1414 printk(KERN_DEBUG
1415 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1416 pci_domain_nr(bus), bus->number, res);
1417 kfree(res);
1418 }
1419}
1420
1421void __init pcibios_resource_survey(void)
1422{
1423 struct pci_bus *b;
1424
1425 /* Allocate and assign resources. If we re-assign everything, then
1426 * we skip the allocate phase
1427 */
1428 list_for_each_entry(b, &pci_root_buses, node)
1429 pcibios_allocate_bus_resources(b);
1430
1431 if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) {
1432 pcibios_allocate_resources(0);
1433 pcibios_allocate_resources(1);
1434 }
1435
1436 /* Before we start assigning unassigned resource, we try to reserve
1437 * the low IO area and the VGA memory area if they intersect the
1438 * bus available resources to avoid allocating things on top of them
1439 */
1440 if (!(pci_flags & PCI_PROBE_ONLY)) {
1441 list_for_each_entry(b, &pci_root_buses, node)
1442 pcibios_reserve_legacy_regions(b);
1443 }
1444
1445 /* Now, if the platform didn't decide to blindly trust the firmware,
1446 * we proceed to assigning things that were left unassigned
1447 */
1448 if (!(pci_flags & PCI_PROBE_ONLY)) {
1449 pr_debug("PCI: Assigning unassigned resources...\n");
1450 pci_assign_unassigned_resources();
1451 }
1452}
1453
1454#ifdef CONFIG_HOTPLUG
1455
1456/* This is used by the PCI hotplug driver to allocate resource
1457 * of newly plugged busses. We can try to consolidate with the
1458 * rest of the code later, for now, keep it as-is as our main
1459 * resource allocation function doesn't deal with sub-trees yet.
1460 */
1461void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1462{
1463 struct pci_dev *dev;
1464 struct pci_bus *child_bus;
1465
1466 list_for_each_entry(dev, &bus->devices, bus_list) {
1467 int i;
1468
1469 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1470 struct resource *r = &dev->resource[i];
1471
1472 if (r->parent || !r->start || !r->flags)
1473 continue;
1474
1475 pr_debug("PCI: Claiming %s: "
1476 "Resource %d: %016llx..%016llx [%x]\n",
1477 pci_name(dev), i,
1478 (unsigned long long)r->start,
1479 (unsigned long long)r->end,
1480 (unsigned int)r->flags);
1481
1482 pci_claim_resource(dev, i);
1483 }
1484 }
1485
1486 list_for_each_entry(child_bus, &bus->children, node)
1487 pcibios_claim_one_bus(child_bus);
1488}
1489EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1490
1491
1492/* pcibios_finish_adding_to_bus
1493 *
1494 * This is to be called by the hotplug code after devices have been
1495 * added to a bus, this include calling it for a PHB that is just
1496 * being added
1497 */
1498void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1499{
1500 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1501 pci_domain_nr(bus), bus->number);
1502
1503 /* Allocate bus and devices resources */
1504 pcibios_allocate_bus_resources(bus);
1505 pcibios_claim_one_bus(bus);
1506
1507 /* Add new devices to global lists. Register in proc, sysfs. */
1508 pci_bus_add_devices(bus);
1509
1510 /* Fixup EEH */
1511 eeh_add_device_tree_late(bus);
1512}
1513EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1514
1515#endif /* CONFIG_HOTPLUG */
1516
1517int pcibios_enable_device(struct pci_dev *dev, int mask)
1518{
1519 return pci_enable_resources(dev, mask);
1520}
1521
1522void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1523{
1524 struct pci_bus *bus = hose->bus;
1525 struct resource *res;
1526 int i;
1527
1528 /* Hookup PHB IO resource */
1529 bus->resource[0] = res = &hose->io_resource;
1530
1531 if (!res->flags) {
1532 printk(KERN_WARNING "PCI: I/O resource not set for host"
1533 " bridge %s (domain %d)\n",
1534 hose->dn->full_name, hose->global_number);
1535 /* Workaround for lack of IO resource only on 32-bit */
1536 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1537 res->end = res->start + IO_SPACE_LIMIT;
1538 res->flags = IORESOURCE_IO;
1539 }
1540
1541 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1542 (unsigned long long)res->start,
1543 (unsigned long long)res->end,
1544 (unsigned long)res->flags);
1545
1546 /* Hookup PHB Memory resources */
1547 for (i = 0; i < 3; ++i) {
1548 res = &hose->mem_resources[i];
1549 if (!res->flags) {
1550 if (i > 0)
1551 continue;
1552 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1553 "host bridge %s (domain %d)\n",
1554 hose->dn->full_name, hose->global_number);
1555
1556 /* Workaround for lack of MEM resource only on 32-bit */
1557 res->start = hose->pci_mem_offset;
1558 res->end = (resource_size_t)-1LL;
1559 res->flags = IORESOURCE_MEM;
1560
1561 }
1562 bus->resource[i+1] = res;
1563
1564 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1565 i, (unsigned long long)res->start,
1566 (unsigned long long)res->end,
1567 (unsigned long)res->flags);
1568 }
1569
1570 pr_debug("PCI: PHB MEM offset = %016llx\n",
1571 (unsigned long long)hose->pci_mem_offset);
1572 pr_debug("PCI: PHB IO offset = %08lx\n",
1573 (unsigned long)hose->io_base_virt - _IO_BASE);
1574}
1575
1576/*
1577 * Null PCI config access functions, for the case when we can't
1578 * find a hose.
1579 */
1580#define NULL_PCI_OP(rw, size, type) \
1581static int \
1582null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1583{ \
1584 return PCIBIOS_DEVICE_NOT_FOUND; \
1585}
1586
1587static int
1588null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1589 int len, u32 *val)
1590{
1591 return PCIBIOS_DEVICE_NOT_FOUND;
1592}
1593
1594static int
1595null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1596 int len, u32 val)
1597{
1598 return PCIBIOS_DEVICE_NOT_FOUND;
1599}
1600
1601static struct pci_ops null_pci_ops = {
1602 .read = null_read_config,
1603 .write = null_write_config,
1604};
1605
1606/*
1607 * These functions are used early on before PCI scanning is done
1608 * and all of the pci_dev and pci_bus structures have been created.
1609 */
1610static struct pci_bus *
1611fake_pci_bus(struct pci_controller *hose, int busnr)
1612{
1613 static struct pci_bus bus;
1614
1615 if (!hose)
1616 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1617
1618 bus.number = busnr;
1619 bus.sysdata = hose;
1620 bus.ops = hose ? hose->ops : &null_pci_ops;
1621 return &bus;
1622}
1623
1624#define EARLY_PCI_OP(rw, size, type) \
1625int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1626 int devfn, int offset, type value) \
1627{ \
1628 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1629 devfn, offset, value); \
1630}
1631
1632EARLY_PCI_OP(read, byte, u8 *)
1633EARLY_PCI_OP(read, word, u16 *)
1634EARLY_PCI_OP(read, dword, u32 *)
1635EARLY_PCI_OP(write, byte, u8)
1636EARLY_PCI_OP(write, word, u16)
1637EARLY_PCI_OP(write, dword, u32)
1638
1639int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1640 int cap)
1641{
1642 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1643}
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
new file mode 100644
index 000000000000..3c3d808d7ce0
--- /dev/null
+++ b/arch/microblaze/pci/pci_32.c
@@ -0,0 +1,431 @@
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14#include <linux/irq.h>
15#include <linux/list.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18
19#include <asm/processor.h>
20#include <asm/io.h>
21#include <asm/prom.h>
22#include <asm/sections.h>
23#include <asm/pci-bridge.h>
24#include <asm/byteorder.h>
25#include <asm/uaccess.h>
26
27#undef DEBUG
28
29unsigned long isa_io_base;
30unsigned long pci_dram_offset;
31int pcibios_assign_bus_offset = 1;
32
33static u8 *pci_to_OF_bus_map;
34
35/* By default, we don't re-assign bus numbers. We do this only on
36 * some pmacs
37 */
38static int pci_assign_all_buses;
39
40static int pci_bus_count;
41
42/*
43 * Functions below are used on OpenFirmware machines.
44 */
45static void
46make_one_node_map(struct device_node *node, u8 pci_bus)
47{
48 const int *bus_range;
49 int len;
50
51 if (pci_bus >= pci_bus_count)
52 return;
53 bus_range = of_get_property(node, "bus-range", &len);
54 if (bus_range == NULL || len < 2 * sizeof(int)) {
55 printk(KERN_WARNING "Can't get bus-range for %s, "
56 "assuming it starts at 0\n", node->full_name);
57 pci_to_OF_bus_map[pci_bus] = 0;
58 } else
59 pci_to_OF_bus_map[pci_bus] = bus_range[0];
60
61 for_each_child_of_node(node, node) {
62 struct pci_dev *dev;
63 const unsigned int *class_code, *reg;
64
65 class_code = of_get_property(node, "class-code", NULL);
66 if (!class_code ||
67 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
68 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
69 continue;
70 reg = of_get_property(node, "reg", NULL);
71 if (!reg)
72 continue;
73 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
74 if (!dev || !dev->subordinate) {
75 pci_dev_put(dev);
76 continue;
77 }
78 make_one_node_map(node, dev->subordinate->number);
79 pci_dev_put(dev);
80 }
81}
82
83void
84pcibios_make_OF_bus_map(void)
85{
86 int i;
87 struct pci_controller *hose, *tmp;
88 struct property *map_prop;
89 struct device_node *dn;
90
91 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
92 if (!pci_to_OF_bus_map) {
93 printk(KERN_ERR "Can't allocate OF bus map !\n");
94 return;
95 }
96
97 /* We fill the bus map with invalid values, that helps
98 * debugging.
99 */
100 for (i = 0; i < pci_bus_count; i++)
101 pci_to_OF_bus_map[i] = 0xff;
102
103 /* For each hose, we begin searching bridges */
104 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
105 struct device_node *node = hose->dn;
106
107 if (!node)
108 continue;
109 make_one_node_map(node, hose->first_busno);
110 }
111 dn = of_find_node_by_path("/");
112 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
113 if (map_prop) {
114 BUG_ON(pci_bus_count > map_prop->length);
115 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
116 }
117 of_node_put(dn);
118#ifdef DEBUG
119 printk(KERN_INFO "PCI->OF bus map:\n");
120 for (i = 0; i < pci_bus_count; i++) {
121 if (pci_to_OF_bus_map[i] == 0xff)
122 continue;
123 printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
124 }
125#endif
126}
127
128typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
129
130static struct device_node *scan_OF_pci_childs(struct device_node *parent,
131 pci_OF_scan_iterator filter, void *data)
132{
133 struct device_node *node;
134 struct device_node *sub_node;
135
136 for_each_child_of_node(parent, node) {
137 const unsigned int *class_code;
138
139 if (filter(node, data)) {
140 of_node_put(node);
141 return node;
142 }
143
144 /* For PCI<->PCI bridges or CardBus bridges, we go down
145 * Note: some OFs create a parent node "multifunc-device" as
146 * a fake root for all functions of a multi-function device,
147 * we go down them as well.
148 */
149 class_code = of_get_property(node, "class-code", NULL);
150 if ((!class_code ||
151 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
152 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
153 strcmp(node->name, "multifunc-device"))
154 continue;
155 sub_node = scan_OF_pci_childs(node, filter, data);
156 if (sub_node) {
157 of_node_put(node);
158 return sub_node;
159 }
160 }
161 return NULL;
162}
163
164static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
165 unsigned int devfn)
166{
167 struct device_node *np, *cnp;
168 const u32 *reg;
169 unsigned int psize;
170
171 for_each_child_of_node(parent, np) {
172 reg = of_get_property(np, "reg", &psize);
173 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
174 return np;
175
176 /* Note: some OFs create a parent node "multifunc-device" as
177 * a fake root for all functions of a multi-function device,
178 * we go down them as well. */
179 if (!strcmp(np->name, "multifunc-device")) {
180 cnp = scan_OF_for_pci_dev(np, devfn);
181 if (cnp)
182 return cnp;
183 }
184 }
185 return NULL;
186}
187
188
189static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
190{
191 struct device_node *parent, *np;
192
193 /* Are we a root bus ? */
194 if (bus->self == NULL || bus->parent == NULL) {
195 struct pci_controller *hose = pci_bus_to_host(bus);
196 if (hose == NULL)
197 return NULL;
198 return of_node_get(hose->dn);
199 }
200
201 /* not a root bus, we need to get our parent */
202 parent = scan_OF_for_pci_bus(bus->parent);
203 if (parent == NULL)
204 return NULL;
205
206 /* now iterate for children for a match */
207 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
208 of_node_put(parent);
209
210 return np;
211}
212
213/*
214 * Scans the OF tree for a device node matching a PCI device
215 */
216struct device_node *
217pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
218{
219 struct device_node *parent, *np;
220
221 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
222 parent = scan_OF_for_pci_bus(bus);
223 if (parent == NULL)
224 return NULL;
225 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
226 np = scan_OF_for_pci_dev(parent, devfn);
227 of_node_put(parent);
228 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
229
230 /* XXX most callers don't release the returned node
231 * mostly because ppc64 doesn't increase the refcount,
232 * we need to fix that.
233 */
234 return np;
235}
236EXPORT_SYMBOL(pci_busdev_to_OF_node);
237
238struct device_node*
239pci_device_to_OF_node(struct pci_dev *dev)
240{
241 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
242}
243EXPORT_SYMBOL(pci_device_to_OF_node);
244
245static int
246find_OF_pci_device_filter(struct device_node *node, void *data)
247{
248 return ((void *)node == data);
249}
250
251/*
252 * Returns the PCI device matching a given OF node
253 */
254int
255pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
256{
257 const unsigned int *reg;
258 struct pci_controller *hose;
259 struct pci_dev *dev = NULL;
260
261 /* Make sure it's really a PCI device */
262 hose = pci_find_hose_for_OF_device(node);
263 if (!hose || !hose->dn)
264 return -ENODEV;
265 if (!scan_OF_pci_childs(hose->dn,
266 find_OF_pci_device_filter, (void *)node))
267 return -ENODEV;
268 reg = of_get_property(node, "reg", NULL);
269 if (!reg)
270 return -ENODEV;
271 *bus = (reg[0] >> 16) & 0xff;
272 *devfn = ((reg[0] >> 8) & 0xff);
273
274 /* Ok, here we need some tweak. If we have already renumbered
275 * all busses, we can't rely on the OF bus number any more.
276 * the pci_to_OF_bus_map is not enough as several PCI busses
277 * may match the same OF bus number.
278 */
279 if (!pci_to_OF_bus_map)
280 return 0;
281
282 for_each_pci_dev(dev)
283 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
284 dev->devfn == *devfn) {
285 *bus = dev->bus->number;
286 pci_dev_put(dev);
287 return 0;
288 }
289
290 return -ENODEV;
291}
292EXPORT_SYMBOL(pci_device_from_OF_node);
293
294/* We create the "pci-OF-bus-map" property now so it appears in the
295 * /proc device tree
296 */
297void __init
298pci_create_OF_bus_map(void)
299{
300 struct property *of_prop;
301 struct device_node *dn;
302
303 of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
304 256);
305 if (!of_prop)
306 return;
307 dn = of_find_node_by_path("/");
308 if (dn) {
309 memset(of_prop, -1, sizeof(struct property) + 256);
310 of_prop->name = "pci-OF-bus-map";
311 of_prop->length = 256;
312 of_prop->value = &of_prop[1];
313 prom_add_property(dn, of_prop);
314 of_node_put(dn);
315 }
316}
317
318static void __devinit pcibios_scan_phb(struct pci_controller *hose)
319{
320 struct pci_bus *bus;
321 struct device_node *node = hose->dn;
322 unsigned long io_offset;
323 struct resource *res = &hose->io_resource;
324
325 pr_debug("PCI: Scanning PHB %s\n",
326 node ? node->full_name : "<NO NAME>");
327
328 /* Create an empty bus for the toplevel */
329 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
330 if (bus == NULL) {
331 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
332 hose->global_number);
333 return;
334 }
335 bus->secondary = hose->first_busno;
336 hose->bus = bus;
337
338 /* Fixup IO space offset */
339 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
340 res->start = (res->start + io_offset) & 0xffffffffu;
341 res->end = (res->end + io_offset) & 0xffffffffu;
342
343 /* Wire up PHB bus resources */
344 pcibios_setup_phb_resources(hose);
345
346 /* Scan children */
347 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
348}
349
350static int __init pcibios_init(void)
351{
352 struct pci_controller *hose, *tmp;
353 int next_busno = 0;
354
355 printk(KERN_INFO "PCI: Probing PCI hardware\n");
356
357 if (pci_flags & PCI_REASSIGN_ALL_BUS) {
358 printk(KERN_INFO "setting pci_asign_all_busses\n");
359 pci_assign_all_buses = 1;
360 }
361
362 /* Scan all of the recorded PCI controllers. */
363 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
364 if (pci_assign_all_buses)
365 hose->first_busno = next_busno;
366 hose->last_busno = 0xff;
367 pcibios_scan_phb(hose);
368 printk(KERN_INFO "calling pci_bus_add_devices()\n");
369 pci_bus_add_devices(hose->bus);
370 if (pci_assign_all_buses || next_busno <= hose->last_busno)
371 next_busno = hose->last_busno + \
372 pcibios_assign_bus_offset;
373 }
374 pci_bus_count = next_busno;
375
376 /* OpenFirmware based machines need a map of OF bus
377 * numbers vs. kernel bus numbers since we may have to
378 * remap them.
379 */
380 if (pci_assign_all_buses)
381 pcibios_make_OF_bus_map();
382
383 /* Call common code to handle resource allocation */
384 pcibios_resource_survey();
385
386 return 0;
387}
388
389subsys_initcall(pcibios_init);
390
391static struct pci_controller*
392pci_bus_to_hose(int bus)
393{
394 struct pci_controller *hose, *tmp;
395
396 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
397 if (bus >= hose->first_busno && bus <= hose->last_busno)
398 return hose;
399 return NULL;
400}
401
402/* Provide information on locations of various I/O regions in physical
403 * memory. Do this on a per-card basis so that we choose the right
404 * root bridge.
405 * Note that the returned IO or memory base is a physical address
406 */
407
408long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
409{
410 struct pci_controller *hose;
411 long result = -EOPNOTSUPP;
412
413 hose = pci_bus_to_hose(bus);
414 if (!hose)
415 return -ENODEV;
416
417 switch (which) {
418 case IOBASE_BRIDGE_NUMBER:
419 return (long)hose->first_busno;
420 case IOBASE_MEMORY:
421 return (long)hose->pci_mem_offset;
422 case IOBASE_IO:
423 return (long)hose->io_base_phys;
424 case IOBASE_ISA_IO:
425 return (long)isa_io_base;
426 case IOBASE_ISA_MEM:
427 return (long)isa_mem_base;
428 }
429
430 return result;
431}
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
new file mode 100644
index 000000000000..7869a41b0f94
--- /dev/null
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -0,0 +1,168 @@
1/*
2 * PCI support for Xilinx plbv46_pci soft-core which can be used on
3 * Xilinx Virtex ML410 / ML510 boards.
4 *
5 * Copyright 2009 Roderick Colenbrander
6 * Copyright 2009 Secret Lab Technologies Ltd.
7 *
8 * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
9 * by Benjamin Herrenschmidt.
10 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/ioport.h>
18#include <linux/of.h>
19#include <linux/pci.h>
20#include <asm/io.h>
21
22#define XPLB_PCI_ADDR 0x10c
23#define XPLB_PCI_DATA 0x110
24#define XPLB_PCI_BUS 0x114
25
26#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
27 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
28
29static struct of_device_id xilinx_pci_match[] = {
30 { .compatible = "xlnx,plbv46-pci-1.03.a", },
31 {}
32};
33
34/**
35 * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
36 */
37static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
38{
39 struct pci_controller *hose;
40 int i;
41
42 if (dev->devfn || dev->bus->self)
43 return;
44
45 hose = pci_bus_to_host(dev->bus);
46 if (!hose)
47 return;
48
49 if (!of_match_node(xilinx_pci_match, hose->dn))
50 return;
51
52 /* Hide the PCI host BARs from the kernel as their content doesn't
53 * fit well in the resource management
54 */
55 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
56 dev->resource[i].start = 0;
57 dev->resource[i].end = 0;
58 dev->resource[i].flags = 0;
59 }
60
61 dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
62 pci_name(dev));
63}
64DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
65
66#ifdef DEBUG
67/**
68 * xilinx_pci_exclude_device - Don't do config access for non-root bus
69 *
70 * This is a hack. Config access to any bus other than bus 0 does not
71 * currently work on the ML510 so we prevent it here.
72 */
73static int
74xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
75{
76 return (bus != 0);
77}
78
79/**
80 * xilinx_early_pci_scan - List pci config space for available devices
81 *
82 * List pci devices in very early phase.
83 */
84void __init xilinx_early_pci_scan(struct pci_controller *hose)
85{
86 u32 bus = 0;
87 u32 val, dev, func, offset;
88
89 /* Currently we have only 2 device connected - up-to 32 devices */
90 for (dev = 0; dev < 2; dev++) {
91 /* List only first function number - up-to 8 functions */
92 for (func = 0; func < 1; func++) {
93 printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
94 /* read the first 64 standardized bytes */
95 /* Up-to 192 bytes can be list of capabilities */
96 for (offset = 0; offset < 64; offset += 4) {
97 early_read_config_dword(hose, bus,
98 PCI_DEVFN(dev, func), offset, &val);
99 if (offset == 0 && val == 0xFFFFFFFF) {
100 printk(KERN_CONT "\nABSENT");
101 break;
102 }
103 if (!(offset % 0x10))
104 printk(KERN_CONT "\n%04x: ", offset);
105
106 printk(KERN_CONT "%08x ", val);
107 }
108 printk(KERN_INFO "\n");
109 }
110 }
111}
112#else
113void __init xilinx_early_pci_scan(struct pci_controller *hose)
114{
115}
116#endif
117
118/**
119 * xilinx_pci_init - Find and register a Xilinx PCI host bridge
120 */
121void __init xilinx_pci_init(void)
122{
123 struct pci_controller *hose;
124 struct resource r;
125 void __iomem *pci_reg;
126 struct device_node *pci_node;
127
128 pci_node = of_find_matching_node(NULL, xilinx_pci_match);
129 if (!pci_node)
130 return;
131
132 if (of_address_to_resource(pci_node, 0, &r)) {
133 pr_err("xilinx-pci: cannot resolve base address\n");
134 return;
135 }
136
137 hose = pcibios_alloc_controller(pci_node);
138 if (!hose) {
139 pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
140 return;
141 }
142
143 /* Setup config space */
144 setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
145 r.start + XPLB_PCI_DATA,
146 INDIRECT_TYPE_SET_CFG_TYPE);
147
148 /* According to the xilinx plbv46_pci documentation the soft-core starts
149 * a self-init when the bus master enable bit is set. Without this bit
150 * set the pci bus can't be scanned.
151 */
152 early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
153
154 /* Set the max latency timer to 255 */
155 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
156
157 /* Set the max bus number to 255, and bus/subbus no's to 0 */
158 pci_reg = of_iomap(pci_node, 0);
159 out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
160 iounmap(pci_reg);
161
162 /* Register the host bridge with the linux kernel! */
163 pci_process_bridge_OF_ranges(hose, pci_node,
164 INDIRECT_TYPE_SET_CFG_TYPE);
165
166 pr_info("xilinx-pci: Registered PCI host bridge\n");
167 xilinx_early_pci_scan(hose);
168}