aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:57:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:57:24 -0400
commit722874465e3cd3268387b96e02220f84b35a8d98 (patch)
tree52f453acb7274ce8e939cf2d5f6ee748b826ef15
parent31cc1dd344d941358345bd02f24c629dada9b08c (diff)
parent841d6e8c4e969b2cdd80f7216af34d932c41b1a6 (diff)
Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze
* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (27 commits) microblaze: entry.S use delay slot for return handlers microblaze: Save current task directly microblaze: Simplify entry.S - save/restore r3/r4 - ret_from_trap microblaze: PCI early support for noMMU system microblaze: Fix dma alloc and free coherent dma functions microblaze: Add consistent code microblaze: pgtable.h: move consistent functions microblaze: Remove ancient Kconfig option for consistent mapping microblaze: Remove VMALLOC_VMADDR microblaze: Add define for ASM_LOOP microblaze: Preliminary support for dma drivers microblaze: remove trailing space in messages microblaze: Use generic show_mem() microblaze: Change temp register for cmdline microblaze: Preliminary support for dma drivers microblaze: Move cache function to cache.c microblaze: Add support from PREEMPT microblaze: Add support for Xilinx PCI host bridge microblaze: Enable PCI, missing files microblaze: Add core PCI files ...
-rw-r--r--arch/microblaze/Kconfig64
-rw-r--r--arch/microblaze/Makefile1
-rw-r--r--arch/microblaze/include/asm/device.h4
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h154
-rw-r--r--arch/microblaze/include/asm/io.h31
-rw-r--r--arch/microblaze/include/asm/irq.h37
-rw-r--r--arch/microblaze/include/asm/page.h12
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h195
-rw-r--r--arch/microblaze/include/asm/pci.h178
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h40
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/include/asm/system.h3
-rw-r--r--arch/microblaze/include/asm/tlbflush.h2
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c211
-rw-r--r--arch/microblaze/kernel/dma.c156
-rw-r--r--arch/microblaze/kernel/entry.S116
-rw-r--r--arch/microblaze/kernel/head.S13
-rw-r--r--arch/microblaze/kernel/irq.c15
-rw-r--r--arch/microblaze/kernel/setup.c45
-rw-r--r--arch/microblaze/mm/Makefile2
-rw-r--r--arch/microblaze/mm/consistent.c246
-rw-r--r--arch/microblaze/mm/init.c39
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/microblaze/pci/Makefile6
-rw-r--r--arch/microblaze/pci/indirect_pci.c163
-rw-r--r--arch/microblaze/pci/iomap.c39
-rw-r--r--arch/microblaze/pci/pci-common.c1642
-rw-r--r--arch/microblaze/pci/pci_32.c430
-rw-r--r--arch/microblaze/pci/xilinx_pci.c168
-rw-r--r--drivers/pci/Makefile1
33 files changed, 3840 insertions, 195 deletions
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index b008168ae946..203ec61c6d4c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -14,6 +14,8 @@ config MICROBLAZE
14 select USB_ARCH_HAS_EHCI 14 select USB_ARCH_HAS_EHCI
15 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select HAVE_OPROFILE 16 select HAVE_OPROFILE
17 select HAVE_DMA_ATTRS
18 select HAVE_DMA_API_DEBUG
17 select TRACING_SUPPORT 19 select TRACING_SUPPORT
18 20
19config SWAP 21config SWAP
@@ -76,9 +78,6 @@ config HAVE_LATENCYTOP_SUPPORT
76config PCI 78config PCI
77 def_bool n 79 def_bool n
78 80
79config NO_DMA
80 def_bool y
81
82config DTC 81config DTC
83 def_bool y 82 def_bool y
84 83
@@ -146,7 +145,6 @@ menu "Advanced setup"
146 145
147config ADVANCED_OPTIONS 146config ADVANCED_OPTIONS
148 bool "Prompt for advanced kernel configuration options" 147 bool "Prompt for advanced kernel configuration options"
149 depends on MMU
150 help 148 help
151 This option will enable prompting for a variety of advanced kernel 149 This option will enable prompting for a variety of advanced kernel
152 configuration options. These options can cause the kernel to not 150 configuration options. These options can cause the kernel to not
@@ -158,6 +156,15 @@ config ADVANCED_OPTIONS
158comment "Default settings for advanced configuration options are used" 156comment "Default settings for advanced configuration options are used"
159 depends on !ADVANCED_OPTIONS 157 depends on !ADVANCED_OPTIONS
160 158
159config XILINX_UNCACHED_SHADOW
160 bool "Are you using uncached shadow for RAM ?"
161 depends on ADVANCED_OPTIONS && !MMU
162 default n
163 help
164 This is needed to be able to allocate uncachable memory regions.
165 The feature requires the design to define the RAM memory controller
166 window to be twice as large as the actual physical memory.
167
161config HIGHMEM_START_BOOL 168config HIGHMEM_START_BOOL
162 bool "Set high memory pool address" 169 bool "Set high memory pool address"
163 depends on ADVANCED_OPTIONS && HIGHMEM 170 depends on ADVANCED_OPTIONS && HIGHMEM
@@ -175,7 +182,7 @@ config HIGHMEM_START
175 182
176config LOWMEM_SIZE_BOOL 183config LOWMEM_SIZE_BOOL
177 bool "Set maximum low memory" 184 bool "Set maximum low memory"
178 depends on ADVANCED_OPTIONS 185 depends on ADVANCED_OPTIONS && MMU
179 help 186 help
180 This option allows you to set the maximum amount of memory which 187 This option allows you to set the maximum amount of memory which
181 will be used as "low memory", that is, memory which the kernel can 188 will be used as "low memory", that is, memory which the kernel can
@@ -187,7 +194,6 @@ config LOWMEM_SIZE_BOOL
187 194
188config LOWMEM_SIZE 195config LOWMEM_SIZE
189 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 196 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
190 depends on MMU
191 default "0x30000000" 197 default "0x30000000"
192 198
193config KERNEL_START_BOOL 199config KERNEL_START_BOOL
@@ -208,7 +214,7 @@ config KERNEL_START
208 214
209config TASK_SIZE_BOOL 215config TASK_SIZE_BOOL
210 bool "Set custom user task size" 216 bool "Set custom user task size"
211 depends on ADVANCED_OPTIONS 217 depends on ADVANCED_OPTIONS && MMU
212 help 218 help
213 This option allows you to set the amount of virtual address space 219 This option allows you to set the amount of virtual address space
214 allocated to user tasks. This can be useful in optimizing the 220 allocated to user tasks. This can be useful in optimizing the
@@ -218,42 +224,34 @@ config TASK_SIZE_BOOL
218 224
219config TASK_SIZE 225config TASK_SIZE
220 hex "Size of user task space" if TASK_SIZE_BOOL 226 hex "Size of user task space" if TASK_SIZE_BOOL
221 depends on MMU
222 default "0x80000000" 227 default "0x80000000"
223 228
224config CONSISTENT_START_BOOL 229endmenu
225 bool "Set custom consistent memory pool address"
226 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
227 help
228 This option allows you to set the base virtual address
229 of the the consistent memory pool. This pool of virtual
230 memory is used to make consistent memory allocations.
231 230
232config CONSISTENT_START 231source "mm/Kconfig"
233 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
234 depends on MMU
235 default "0xff100000" if NOT_COHERENT_CACHE
236 232
237config CONSISTENT_SIZE_BOOL 233menu "Exectuable file formats"
238 bool "Set custom consistent memory pool size"
239 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
240 help
241 This option allows you to set the size of the the
242 consistent memory pool. This pool of virtual memory
243 is used to make consistent memory allocations.
244 234
245config CONSISTENT_SIZE 235source "fs/Kconfig.binfmt"
246 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
247 depends on MMU
248 default "0x00200000" if NOT_COHERENT_CACHE
249 236
250endmenu 237endmenu
251 238
252source "mm/Kconfig" 239menu "Bus Options"
253 240
254menu "Exectuable file formats" 241config PCI
242 bool "PCI support"
255 243
256source "fs/Kconfig.binfmt" 244config PCI_DOMAINS
245 def_bool PCI
246
247config PCI_SYSCALL
248 def_bool PCI
249
250config PCI_XILINX
251 bool "Xilinx PCI host bridge support"
252 depends on PCI
253
254source "drivers/pci/Kconfig"
257 255
258endmenu 256endmenu
259 257
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d2d6cfcb1a30..836832dd9b26 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -50,6 +50,7 @@ libs-y += $(LIBGCC)
50core-y += arch/microblaze/kernel/ 50core-y += arch/microblaze/kernel/
51core-y += arch/microblaze/mm/ 51core-y += arch/microblaze/mm/
52core-y += arch/microblaze/platform/ 52core-y += arch/microblaze/platform/
53core-$(CONFIG_PCI) += arch/microblaze/pci/
53 54
54drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ 55drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
55 56
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 78a038452c0f..402b46e630f6 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -14,6 +14,10 @@ struct device_node;
14struct dev_archdata { 14struct dev_archdata {
15 /* Optional pointer to an OF device node */ 15 /* Optional pointer to an OF device node */
16 struct device_node *of_node; 16 struct device_node *of_node;
17
18 /* DMA operations on that device */
19 struct dma_map_ops *dma_ops;
20 void *dma_data;
17}; 21};
18 22
19struct pdev_archdata { 23struct pdev_archdata {
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index d00e40099165..18b3731c8509 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -1 +1,153 @@
1#include <asm-generic/dma-mapping-broken.h> 1/*
2 * Implements the generic device dma API for microblaze and the pci
3 *
4 * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009-2010 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * This file is base on powerpc and x86 dma-mapping.h versions
12 * Copyright (C) 2004 IBM
13 */
14
15#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
16#define _ASM_MICROBLAZE_DMA_MAPPING_H
17
18/*
19 * See Documentation/PCI/PCI-DMA-mapping.txt and
20 * Documentation/DMA-API.txt for documentation.
21 */
22
23#include <linux/types.h>
24#include <linux/cache.h>
25#include <linux/mm.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-debug.h>
28#include <linux/dma-attrs.h>
29#include <asm/io.h>
30#include <asm-generic/dma-coherent.h>
31
32#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
33
34#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
35#define __dma_free_coherent(size, addr) ((void)0)
36#define __dma_sync(addr, size, rw) ((void)0)
37
38static inline unsigned long device_to_mask(struct device *dev)
39{
40 if (dev->dma_mask && *dev->dma_mask)
41 return *dev->dma_mask;
42 /* Assume devices without mask can take 32 bit addresses */
43 return 0xfffffffful;
44}
45
46extern struct dma_map_ops *dma_ops;
47
48/*
49 * Available generic sets of operations
50 */
51extern struct dma_map_ops dma_direct_ops;
52
53static inline struct dma_map_ops *get_dma_ops(struct device *dev)
54{
55 /* We don't handle the NULL dev case for ISA for now. We could
56 * do it via an out of line call but it is not needed for now. The
57 * only ISA DMA device we support is the floppy and we have a hack
58 * in the floppy driver directly to get a device for us.
59 */
60 if (unlikely(!dev) || !dev->archdata.dma_ops)
61 return NULL;
62
63 return dev->archdata.dma_ops;
64}
65
66static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
67{
68 dev->archdata.dma_ops = ops;
69}
70
71static inline int dma_supported(struct device *dev, u64 mask)
72{
73 struct dma_map_ops *ops = get_dma_ops(dev);
74
75 if (unlikely(!ops))
76 return 0;
77 if (!ops->dma_supported)
78 return 1;
79 return ops->dma_supported(dev, mask);
80}
81
82#ifdef CONFIG_PCI
83/* We have our own implementation of pci_set_dma_mask() */
84#define HAVE_ARCH_PCI_SET_DMA_MASK
85
86#endif
87
88static inline int dma_set_mask(struct device *dev, u64 dma_mask)
89{
90 struct dma_map_ops *ops = get_dma_ops(dev);
91
92 if (unlikely(ops == NULL))
93 return -EIO;
94 if (ops->set_dma_mask)
95 return ops->set_dma_mask(dev, dma_mask);
96 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
97 return -EIO;
98 *dev->dma_mask = dma_mask;
99 return 0;
100}
101
102#include <asm-generic/dma-mapping-common.h>
103
104static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
105{
106 struct dma_map_ops *ops = get_dma_ops(dev);
107 if (ops->mapping_error)
108 return ops->mapping_error(dev, dma_addr);
109
110 return (dma_addr == DMA_ERROR_CODE);
111}
112
113#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
114#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
115#define dma_is_consistent(d, h) (1)
116
117static inline void *dma_alloc_coherent(struct device *dev, size_t size,
118 dma_addr_t *dma_handle, gfp_t flag)
119{
120 struct dma_map_ops *ops = get_dma_ops(dev);
121 void *memory;
122
123 BUG_ON(!ops);
124
125 memory = ops->alloc_coherent(dev, size, dma_handle, flag);
126
127 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
128 return memory;
129}
130
131static inline void dma_free_coherent(struct device *dev, size_t size,
132 void *cpu_addr, dma_addr_t dma_handle)
133{
134 struct dma_map_ops *ops = get_dma_ops(dev);
135
136 BUG_ON(!ops);
137 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
138 ops->free_coherent(dev, size, cpu_addr, dma_handle);
139}
140
141static inline int dma_get_cache_alignment(void)
142{
143 return L1_CACHE_BYTES;
144}
145
146static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
147 enum dma_data_direction direction)
148{
149 BUG_ON(direction == DMA_NONE);
150 __dma_sync(vaddr, size, (int)direction);
151}
152
153#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 267c7c779e53..32d621a56aee 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -15,7 +15,23 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/mm.h> /* Get struct page {...} */ 17#include <linux/mm.h> /* Get struct page {...} */
18#include <asm-generic/iomap.h>
18 19
20#ifndef CONFIG_PCI
21#define _IO_BASE 0
22#define _ISA_MEM_BASE 0
23#define PCI_DRAM_OFFSET 0
24#else
25#define _IO_BASE isa_io_base
26#define _ISA_MEM_BASE isa_mem_base
27#define PCI_DRAM_OFFSET pci_dram_offset
28#endif
29
30extern unsigned long isa_io_base;
31extern unsigned long pci_io_base;
32extern unsigned long pci_dram_offset;
33
34extern resource_size_t isa_mem_base;
19 35
20#define IO_SPACE_LIMIT (0xFFFFFFFF) 36#define IO_SPACE_LIMIT (0xFFFFFFFF)
21 37
@@ -124,9 +140,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
124#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 140#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
125#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 141#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
126 142
127#define __page_address(page) \
128 (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
129#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
130#define page_to_bus(page) (page_to_phys(page)) 143#define page_to_bus(page) (page_to_phys(page))
131#define bus_to_virt(addr) (phys_to_virt(addr)) 144#define bus_to_virt(addr) (phys_to_virt(addr))
132 145
@@ -227,15 +240,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
227#define out_8(a, v) __raw_writeb((v), (a)) 240#define out_8(a, v) __raw_writeb((v), (a))
228#define in_8(a) __raw_readb(a) 241#define in_8(a) __raw_readb(a)
229 242
230/* FIXME */ 243#define ioport_map(port, nr) ((void __iomem *)(port))
231static inline void __iomem *ioport_map(unsigned long port, unsigned int len) 244#define ioport_unmap(addr)
232{
233 return (void __iomem *) (port);
234}
235
236static inline void ioport_unmap(void __iomem *addr)
237{
238 /* Nothing to do */
239}
240 245
241#endif /* _ASM_MICROBLAZE_IO_H */ 246#endif /* _ASM_MICROBLAZE_IO_H */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index 90f050535ebe..31a35c33df63 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -14,6 +14,12 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16
17/* This type is the placeholder for a hardware interrupt number. It has to
18 * be big enough to enclose whatever representation is used by a given
19 * platform.
20 */
21typedef unsigned long irq_hw_number_t;
22
17extern unsigned int nr_irq; 23extern unsigned int nr_irq;
18 24
19#define NO_IRQ (-1) 25#define NO_IRQ (-1)
@@ -21,7 +27,8 @@ extern unsigned int nr_irq;
21struct pt_regs; 27struct pt_regs;
22extern void do_IRQ(struct pt_regs *regs); 28extern void do_IRQ(struct pt_regs *regs);
23 29
24/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 30/**
31 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
25 * @device: Device node of the device whose interrupt is to be mapped 32 * @device: Device node of the device whose interrupt is to be mapped
26 * @index: Index of the interrupt to map 33 * @index: Index of the interrupt to map
27 * 34 *
@@ -40,4 +47,32 @@ static inline void irq_dispose_mapping(unsigned int virq)
40 return; 47 return;
41} 48}
42 49
50struct irq_host;
51
52/**
53 * irq_create_mapping - Map a hardware interrupt into linux virq space
54 * @host: host owning this hardware interrupt or NULL for default host
55 * @hwirq: hardware irq number in that host space
56 *
57 * Only one mapping per hardware interrupt is permitted. Returns a linux
58 * virq number.
59 * If the sense/trigger is to be specified, set_irq_type() should be called
60 * on the number returned from that call.
61 */
62extern unsigned int irq_create_mapping(struct irq_host *host,
63 irq_hw_number_t hwirq);
64
65/**
66 * irq_create_of_mapping - Map a hardware interrupt into linux virq space
67 * @controller: Device node of the interrupt controller
68 * @inspec: Interrupt specifier from the device-tree
69 * @intsize: Size of the interrupt specifier from the device-tree
70 *
71 * This function is identical to irq_create_mapping except that it takes
72 * as input informations straight from the device-tree (typically the results
73 * of the of_irq_map_*() functions.
74 */
75extern unsigned int irq_create_of_mapping(struct device_node *controller,
76 u32 *intspec, unsigned int intsize);
77
43#endif /* _ASM_MICROBLAZE_IRQ_H */ 78#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 9b66c0fa9a32..2dd1d04129e0 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -62,12 +62,6 @@ extern unsigned int __page_offset;
62#define PAGE_OFFSET CONFIG_KERNEL_START 62#define PAGE_OFFSET CONFIG_KERNEL_START
63 63
64/* 64/*
65 * MAP_NR -- given an address, calculate the index of the page struct which
66 * points to the address's page.
67 */
68#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
69
70/*
71 * The basic type of a PTE - 32 bit physical addressing. 65 * The basic type of a PTE - 32 bit physical addressing.
72 */ 66 */
73typedef unsigned long pte_basic_t; 67typedef unsigned long pte_basic_t;
@@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn);
154# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 148# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
155 149
156# ifdef CONFIG_MMU 150# ifdef CONFIG_MMU
157# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) 151
152# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
153# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
154# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
155
158# else /* CONFIG_MMU */ 156# else /* CONFIG_MMU */
159# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 157# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
160# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 158# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 7ad28f6f5f1a..0c77cda9f5d8 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -1 +1,196 @@
1#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H
2#define _ASM_MICROBLAZE_PCI_BRIDGE_H
3#ifdef __KERNEL__
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
1#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/list.h>
12#include <linux/ioport.h>
13
14struct device_node;
15
16enum {
17 /* Force re-assigning all resources (ignore firmware
18 * setup completely)
19 */
20 PCI_REASSIGN_ALL_RSRC = 0x00000001,
21
22 /* Re-assign all bus numbers */
23 PCI_REASSIGN_ALL_BUS = 0x00000002,
24
25 /* Do not try to assign, just use existing setup */
26 PCI_PROBE_ONLY = 0x00000004,
27
28 /* Don't bother with ISA alignment unless the bridge has
29 * ISA forwarding enabled
30 */
31 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
32
33 /* Enable domain numbers in /proc */
34 PCI_ENABLE_PROC_DOMAINS = 0x00000010,
35 /* ... except for domain 0 */
36 PCI_COMPAT_DOMAIN_0 = 0x00000020,
37};
38
39/*
40 * Structure of a PCI controller (host bridge)
41 */
42struct pci_controller {
43 struct pci_bus *bus;
44 char is_dynamic;
45 struct device_node *dn;
46 struct list_head list_node;
47 struct device *parent;
48
49 int first_busno;
50 int last_busno;
51
52 int self_busno;
53
54 void __iomem *io_base_virt;
55 resource_size_t io_base_phys;
56
57 resource_size_t pci_io_size;
58
59 /* Some machines (PReP) have a non 1:1 mapping of
60 * the PCI memory space in the CPU bus space
61 */
62 resource_size_t pci_mem_offset;
63
64 /* Some machines have a special region to forward the ISA
65 * "memory" cycles such as VGA memory regions. Left to 0
66 * if unsupported
67 */
68 resource_size_t isa_mem_phys;
69 resource_size_t isa_mem_size;
70
71 struct pci_ops *ops;
72 unsigned int __iomem *cfg_addr;
73 void __iomem *cfg_data;
74
75 /*
76 * Used for variants of PCI indirect handling and possible quirks:
77 * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
78 * EXT_REG - provides access to PCI-e extended registers
79 * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
80 * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
81 * to determine which bus number to match on when generating type0
82 * config cycles
83 * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
84 * hanging if we don't have link and try to do config cycles to
85 * anything but the PHB. Only allow talking to the PHB if this is
86 * set.
87 * BIG_ENDIAN - cfg_addr is a big endian register
88 * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs
89 * on the PLB4. Effectively disable MRM commands by setting this.
90 */
91#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
92#define INDIRECT_TYPE_EXT_REG 0x00000002
93#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
94#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
95#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010
96#define INDIRECT_TYPE_BROKEN_MRM 0x00000020
97 u32 indirect_type;
98
99 /* Currently, we limit ourselves to 1 IO range and 3 mem
100 * ranges since the common pci_bus structure can't handle more
101 */
102 struct resource io_resource;
103 struct resource mem_resources[3];
104 int global_number; /* PCI domain number */
105};
106
107static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
108{
109 return bus->sysdata;
110}
111
112static inline int isa_vaddr_is_ioport(void __iomem *address)
113{
114 /* No specific ISA handling on ppc32 at this stage, it
115 * all goes through PCI
116 */
117 return 0;
118}
119
120/* These are used for config access before all the PCI probing
121 has been done. */
122extern int early_read_config_byte(struct pci_controller *hose, int bus,
123 int dev_fn, int where, u8 *val);
124extern int early_read_config_word(struct pci_controller *hose, int bus,
125 int dev_fn, int where, u16 *val);
126extern int early_read_config_dword(struct pci_controller *hose, int bus,
127 int dev_fn, int where, u32 *val);
128extern int early_write_config_byte(struct pci_controller *hose, int bus,
129 int dev_fn, int where, u8 val);
130extern int early_write_config_word(struct pci_controller *hose, int bus,
131 int dev_fn, int where, u16 val);
132extern int early_write_config_dword(struct pci_controller *hose, int bus,
133 int dev_fn, int where, u32 val);
134
135extern int early_find_capability(struct pci_controller *hose, int bus,
136 int dev_fn, int cap);
137
138extern void setup_indirect_pci(struct pci_controller *hose,
139 resource_size_t cfg_addr,
140 resource_size_t cfg_data, u32 flags);
141
142/* Get the PCI host controller for an OF device */
143extern struct pci_controller *pci_find_hose_for_OF_device(
144 struct device_node *node);
145
146/* Fill up host controller resources from the OF node */
147extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
148 struct device_node *dev, int primary);
149
150/* Allocate & free a PCI host bridge structure */
151extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
152extern void pcibios_free_controller(struct pci_controller *phb);
153extern void pcibios_setup_phb_resources(struct pci_controller *hose);
154
155#ifdef CONFIG_PCI
156extern unsigned int pci_flags;
157
158static inline void pci_set_flags(int flags)
159{
160 pci_flags = flags;
161}
162
163static inline void pci_add_flags(int flags)
164{
165 pci_flags |= flags;
166}
167
168static inline int pci_has_flag(int flag)
169{
170 return pci_flags & flag;
171}
172
173extern struct list_head hose_list;
174
175extern unsigned long pci_address_to_pio(phys_addr_t address);
176extern int pcibios_vaddr_is_ioport(void __iomem *address);
177#else
178static inline unsigned long pci_address_to_pio(phys_addr_t address)
179{
180 return (unsigned long)-1;
181}
182static inline int pcibios_vaddr_is_ioport(void __iomem *address)
183{
184 return 0;
185}
186
187static inline void pci_set_flags(int flags) { }
188static inline void pci_add_flags(int flags) { }
189static inline int pci_has_flag(int flag)
190{
191 return 0;
192}
193#endif /* CONFIG_PCI */
194
195#endif /* __KERNEL__ */
196#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 9f0df5faf2c8..bdd65aaee30d 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -1 +1,177 @@
1#include <asm-generic/pci.h> 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Based on powerpc version
8 */
9
10#ifndef __ASM_MICROBLAZE_PCI_H
11#define __ASM_MICROBLAZE_PCI_H
12#ifdef __KERNEL__
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/dma-mapping.h>
18#include <linux/pci.h>
19
20#include <asm/scatterlist.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24
25#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM 0x10000000
27
28struct pci_dev;
29
30/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
31#define IOBASE_BRIDGE_NUMBER 0
32#define IOBASE_MEMORY 1
33#define IOBASE_IO 2
34#define IOBASE_ISA_IO 3
35#define IOBASE_ISA_MEM 4
36
37#define pcibios_scan_all_fns(a, b) 0
38
39/*
40 * Set this to 1 if you want the kernel to re-assign all PCI
41 * bus numbers (don't do that on ppc64 yet !)
42 */
43#define pcibios_assign_all_busses() \
44 (pci_has_flag(PCI_REASSIGN_ALL_BUS))
45
46static inline void pcibios_set_master(struct pci_dev *dev)
47{
48 /* No special bus mastering setup handling */
49}
50
51static inline void pcibios_penalize_isa_irq(int irq, int active)
52{
53 /* We don't do dynamic PCI IRQ allocation */
54}
55
56#ifdef CONFIG_PCI
57extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
58extern struct dma_map_ops *get_pci_dma_ops(void);
59#else /* CONFIG_PCI */
60#define set_pci_dma_ops(d)
61#define get_pci_dma_ops() NULL
62#endif
63
64#ifdef CONFIG_PCI
65static inline void pci_dma_burst_advice(struct pci_dev *pdev,
66 enum pci_dma_burst_strategy *strat,
67 unsigned long *strategy_parameter)
68{
69 *strat = PCI_DMA_BURST_INFINITY;
70 *strategy_parameter = ~0UL;
71}
72#endif
73
74extern int pci_domain_nr(struct pci_bus *bus);
75
76/* Decide whether to display the domain number in /proc */
77extern int pci_proc_domain(struct pci_bus *bus);
78
79struct vm_area_struct;
80/* Map a range of PCI memory or I/O space for a device into user space */
81int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
82 enum pci_mmap_state mmap_state, int write_combine);
83
84/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
85#define HAVE_PCI_MMAP 1
86
87extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
88 size_t count);
89extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
90 size_t count);
91extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
92 struct vm_area_struct *vma,
93 enum pci_mmap_state mmap_state);
94
95#define HAVE_PCI_LEGACY 1
96
97/* pci_unmap_{page,single} is a nop so... */
98#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
99#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
100#define pci_unmap_addr(PTR, ADDR_NAME) (0)
101#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
102#define pci_unmap_len(PTR, LEN_NAME) (0)
103#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
104
105/* The PCI address space does equal the physical memory
106 * address space (no IOMMU). The IDE and SCSI device layers use
107 * this boolean for bounce buffer decisions.
108 */
109#define PCI_DMA_BUS_IS_PHYS (1)
110
111extern void pcibios_resource_to_bus(struct pci_dev *dev,
112 struct pci_bus_region *region,
113 struct resource *res);
114
115extern void pcibios_bus_to_resource(struct pci_dev *dev,
116 struct resource *res,
117 struct pci_bus_region *region);
118
119static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
120 struct resource *res)
121{
122 struct resource *root = NULL;
123
124 if (res->flags & IORESOURCE_IO)
125 root = &ioport_resource;
126 if (res->flags & IORESOURCE_MEM)
127 root = &iomem_resource;
128
129 return root;
130}
131
132extern void pcibios_claim_one_bus(struct pci_bus *b);
133
134extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
135
136extern void pcibios_resource_survey(void);
137
138extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
139extern int remove_phb_dynamic(struct pci_controller *phb);
140
141extern struct pci_dev *of_create_pci_dev(struct device_node *node,
142 struct pci_bus *bus, int devfn);
143
144extern void of_scan_pci_bridge(struct device_node *node,
145 struct pci_dev *dev);
146
147extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
148extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
149
150extern int pci_read_irq_line(struct pci_dev *dev);
151
152extern int pci_bus_find_capability(struct pci_bus *bus,
153 unsigned int devfn, int cap);
154
155struct file;
156extern pgprot_t pci_phys_mem_access_prot(struct file *file,
157 unsigned long pfn,
158 unsigned long size,
159 pgprot_t prot);
160
161#define HAVE_ARCH_PCI_RESOURCE_TO_USER
162extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
163 const struct resource *rsrc,
164 resource_size_t *start, resource_size_t *end);
165
166extern void pcibios_setup_bus_devices(struct pci_bus *bus);
167extern void pcibios_setup_bus_self(struct pci_bus *bus);
168
169/* This part of code was originaly in xilinx-pci.h */
170#ifdef CONFIG_PCI_XILINX
171extern void __init xilinx_pci_init(void);
172#else
173static inline void __init xilinx_pci_init(void) { return; }
174#endif
175
176#endif /* __KERNEL__ */
177#endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 7547f5064560..f44b0d696fe2 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/cache.h> 21#include <asm/cache.h>
22#include <asm/pgtable.h>
22 23
23#define PGDIR_ORDER 0 24#define PGDIR_ORDER 0
24 25
@@ -111,7 +112,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
111 unsigned long address) 112 unsigned long address)
112{ 113{
113 pte_t *pte; 114 pte_t *pte;
114 extern int mem_init_done;
115 extern void *early_get_page(void); 115 extern void *early_get_page(void);
116 if (mem_init_done) { 116 if (mem_init_done) {
117 pte = (pte_t *)__get_free_page(GFP_KERNEL | 117 pte = (pte_t *)__get_free_page(GFP_KERNEL |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index cc3a4dfc3eaa..dd2bb60651c7 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -16,6 +16,10 @@
16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
17 remap_pfn_range(vma, vaddr, pfn, size, prot) 17 remap_pfn_range(vma, vaddr, pfn, size, prot)
18 18
19#ifndef __ASSEMBLY__
20extern int mem_init_done;
21#endif
22
19#ifndef CONFIG_MMU 23#ifndef CONFIG_MMU
20 24
21#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 25#define pgd_present(pgd) (1) /* pages are always present on non MMU */
@@ -51,6 +55,8 @@ static inline int pte_file(pte_t pte) { return 0; }
51 55
52#define arch_enter_lazy_cpu_mode() do {} while (0) 56#define arch_enter_lazy_cpu_mode() do {} while (0)
53 57
58#define pgprot_noncached_wc(prot) prot
59
54#else /* CONFIG_MMU */ 60#else /* CONFIG_MMU */
55 61
56#include <asm-generic/4level-fixup.h> 62#include <asm-generic/4level-fixup.h>
@@ -68,7 +74,6 @@ static inline int pte_file(pte_t pte) { return 0; }
68 74
69extern unsigned long va_to_phys(unsigned long address); 75extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address); 76extern pte_t *va_to_pte(unsigned long address);
71extern unsigned long ioremap_bot, ioremap_base;
72 77
73/* 78/*
74 * The following only work if pte_present() is true. 79 * The following only work if pte_present() is true.
@@ -85,11 +90,25 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
85#define VMALLOC_START (CONFIG_KERNEL_START + \ 90#define VMALLOC_START (CONFIG_KERNEL_START + \
86 max(32 * 1024 * 1024UL, memory_size)) 91 max(32 * 1024 * 1024UL, memory_size))
87#define VMALLOC_END ioremap_bot 92#define VMALLOC_END ioremap_bot
88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
89 93
90#endif /* __ASSEMBLY__ */ 94#endif /* __ASSEMBLY__ */
91 95
92/* 96/*
97 * Macro to mark a page protection value as "uncacheable".
98 */
99
100#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
101 _PAGE_WRITETHRU)
102
103#define pgprot_noncached(prot) \
104 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
105 _PAGE_NO_CACHE | _PAGE_GUARDED))
106
107#define pgprot_noncached_wc(prot) \
108 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
109 _PAGE_NO_CACHE))
110
111/*
93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash 112 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
94 * table containing PTEs, together with a set of 16 segment registers, to 113 * table containing PTEs, together with a set of 16 segment registers, to
95 * define the virtual to physical address mapping. 114 * define the virtual to physical address mapping.
@@ -397,7 +416,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
397 mts rmsr, %2\n\ 416 mts rmsr, %2\n\
398 nop" 417 nop"
399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) 418 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
400 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) 419 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
401 : "cc"); 420 : "cc");
402 421
403 return old; 422 return old;
@@ -566,18 +585,11 @@ void mapin_ram(void);
566int map_page(unsigned long va, phys_addr_t pa, int flags); 585int map_page(unsigned long va, phys_addr_t pa, int flags);
567 586
568extern int mem_init_done; 587extern int mem_init_done;
569extern unsigned long ioremap_base;
570extern unsigned long ioremap_bot;
571 588
572asmlinkage void __init mmu_init(void); 589asmlinkage void __init mmu_init(void);
573 590
574void __init *early_get_page(void); 591void __init *early_get_page(void);
575 592
576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
577void consistent_free(void *vaddr);
578void consistent_sync(void *vaddr, size_t size, int direction);
579void consistent_sync_page(struct page *page, unsigned long offset,
580 size_t size, int direction);
581#endif /* __ASSEMBLY__ */ 593#endif /* __ASSEMBLY__ */
582#endif /* __KERNEL__ */ 594#endif /* __KERNEL__ */
583 595
@@ -586,6 +598,14 @@ void consistent_sync_page(struct page *page, unsigned long offset,
586#ifndef __ASSEMBLY__ 598#ifndef __ASSEMBLY__
587#include <asm-generic/pgtable.h> 599#include <asm-generic/pgtable.h>
588 600
601extern unsigned long ioremap_bot, ioremap_base;
602
603void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
604void consistent_free(void *vaddr);
605void consistent_sync(void *vaddr, size_t size, int direction);
606void consistent_sync_page(struct page *page, unsigned long offset,
607 size_t size, int direction);
608
589void setup_memory(void); 609void setup_memory(void);
590#endif /* __ASSEMBLY__ */ 610#endif /* __ASSEMBLY__ */
591 611
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 03f45a963204..e7d67a329bd7 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -31,6 +31,21 @@
31/* Other Prototypes */ 31/* Other Prototypes */
32extern int early_uartlite_console(void); 32extern int early_uartlite_console(void);
33 33
34#ifdef CONFIG_PCI
35/*
36 * PCI <-> OF matching functions
37 * (XXX should these be here?)
38 */
39struct pci_bus;
40struct pci_dev;
41extern int pci_device_from_OF_node(struct device_node *node,
42 u8 *bus, u8 *devfn);
43extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
44 int devfn);
45extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
46extern void pci_create_OF_bus_map(void);
47#endif
48
34/* 49/*
35 * OF address retreival & translation 50 * OF address retreival & translation
36 */ 51 */
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 157970688b2a..59efb3fef957 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -87,6 +87,9 @@ void free_initmem(void);
87extern char *klimit; 87extern char *klimit;
88extern void ret_from_fork(void); 88extern void ret_from_fork(void);
89 89
90extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
91extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
92
90#ifdef CONFIG_DEBUG_FS 93#ifdef CONFIG_DEBUG_FS
91extern struct dentry *of_debugfs_root; 94extern struct dentry *of_debugfs_root;
92#endif 95#endif
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index 10ec70cd8735..bcb8b41d55af 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -23,7 +23,7 @@
23extern void _tlbie(unsigned long address); 23extern void _tlbie(unsigned long address);
24extern void _tlbia(void); 24extern void _tlbia(void);
25 25
26#define __tlbia() _tlbia() 26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
27 27
28static inline void local_flush_tlb_all(void) 28static inline void local_flush_tlb_all(void)
29 { __tlbia(); } 29 { __tlbia(); }
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index b07594eccf9b..e51bc1520825 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -14,7 +14,7 @@ endif
14 14
15extra-y := head.o vmlinux.lds 15extra-y := head.o vmlinux.lds
16 16
17obj-y += exceptions.o \ 17obj-y += dma.o exceptions.o \
18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ 18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
19 of_platform.o process.o prom.o prom_parse.o ptrace.o \ 19 of_platform.o process.o prom.o prom_parse.o ptrace.o \
20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o 20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 7bc7b68f97db..0071260a672c 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -90,6 +90,7 @@ int main(int argc, char *argv[])
90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
93 DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
93 BLANK(); 94 BLANK();
94 95
95 /* struct cpu_context */ 96 /* struct cpu_context */
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 2a56bccce4e0..f04d8a86dead 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h> 16#include <asm/pvr.h>
17 17
18static inline void __invalidate_flush_icache(unsigned int addr)
19{
20 __asm__ __volatile__ ("wic %0, r0;" \
21 : : "r" (addr));
22}
23
24static inline void __flush_dcache(unsigned int addr)
25{
26 __asm__ __volatile__ ("wdc.flush %0, r0;" \
27 : : "r" (addr));
28}
29
30static inline void __invalidate_dcache(unsigned int baseaddr,
31 unsigned int offset)
32{
33 __asm__ __volatile__ ("wdc.clear %0, %1;" \
34 : : "r" (baseaddr), "r" (offset));
35}
36
37static inline void __enable_icache_msr(void) 18static inline void __enable_icache_msr(void)
38{ 19{
39 __asm__ __volatile__ (" msrset r0, %0; \ 20 __asm__ __volatile__ (" msrset r0, %0; \
@@ -148,9 +129,9 @@ do { \
148 int step = -line_length; \ 129 int step = -line_length; \
149 BUG_ON(step >= 0); \ 130 BUG_ON(step >= 0); \
150 \ 131 \
151 __asm__ __volatile__ (" 1: " #op " r0, %0; \ 132 __asm__ __volatile__ (" 1: " #op " r0, %0; \
152 bgtid %0, 1b; \ 133 bgtid %0, 1b; \
153 addk %0, %0, %1; \ 134 addk %0, %0, %1; \
154 " : : "r" (len), "r" (step) \ 135 " : : "r" (len), "r" (step) \
155 : "memory"); \ 136 : "memory"); \
156} while (0); 137} while (0);
@@ -162,9 +143,9 @@ do { \
162 int count = end - start; \ 143 int count = end - start; \
163 BUG_ON(count <= 0); \ 144 BUG_ON(count <= 0); \
164 \ 145 \
165 __asm__ __volatile__ (" 1: " #op " %0, %1; \ 146 __asm__ __volatile__ (" 1: " #op " %0, %1; \
166 bgtid %1, 1b; \ 147 bgtid %1, 1b; \
167 addk %1, %1, %2; \ 148 addk %1, %1, %2; \
168 " : : "r" (start), "r" (count), \ 149 " : : "r" (start), "r" (count), \
169 "r" (step) : "memory"); \ 150 "r" (step) : "memory"); \
170} while (0); 151} while (0);
@@ -175,7 +156,7 @@ do { \
175 int volatile temp; \ 156 int volatile temp; \
176 BUG_ON(end - start <= 0); \ 157 BUG_ON(end - start <= 0); \
177 \ 158 \
178 __asm__ __volatile__ (" 1: " #op " %1, r0; \ 159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
179 cmpu %0, %1, %2; \ 160 cmpu %0, %1, %2; \
180 bgtid %0, 1b; \ 161 bgtid %0, 1b; \
181 addk %1, %1, %3; \ 162 addk %1, %1, %3; \
@@ -183,10 +164,14 @@ do { \
183 "r" (line_length) : "memory"); \ 164 "r" (line_length) : "memory"); \
184} while (0); 165} while (0);
185 166
167#define ASM_LOOP
168
186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 169static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
187{ 170{
188 unsigned long flags; 171 unsigned long flags;
189 172#ifndef ASM_LOOP
173 int i;
174#endif
190 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
191 (unsigned int)start, (unsigned int) end); 176 (unsigned int)start, (unsigned int) end);
192 177
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
196 local_irq_save(flags); 181 local_irq_save(flags);
197 __disable_icache_msr(); 182 __disable_icache_msr();
198 183
184#ifdef ASM_LOOP
199 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
200 186#else
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
189 : : "r" (i));
190#endif
201 __enable_icache_msr(); 191 __enable_icache_msr();
202 local_irq_restore(flags); 192 local_irq_restore(flags);
203} 193}
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
206 unsigned long end) 196 unsigned long end)
207{ 197{
208 unsigned long flags; 198 unsigned long flags;
209 199#ifndef ASM_LOOP
200 int i;
201#endif
210 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
211 (unsigned int)start, (unsigned int) end); 203 (unsigned int)start, (unsigned int) end);
212 204
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
216 local_irq_save(flags); 208 local_irq_save(flags);
217 __disable_icache_nomsr(); 209 __disable_icache_nomsr();
218 210
211#ifdef ASM_LOOP
219 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
213#else
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
216 : : "r" (i));
217#endif
220 218
221 __enable_icache_nomsr(); 219 __enable_icache_nomsr();
222 local_irq_restore(flags); 220 local_irq_restore(flags);
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
225static void __flush_icache_range_noirq(unsigned long start, 223static void __flush_icache_range_noirq(unsigned long start,
226 unsigned long end) 224 unsigned long end)
227{ 225{
226#ifndef ASM_LOOP
227 int i;
228#endif
228 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
229 (unsigned int)start, (unsigned int) end); 230 (unsigned int)start, (unsigned int) end);
230 231
231 CACHE_LOOP_LIMITS(start, end, 232 CACHE_LOOP_LIMITS(start, end,
232 cpuinfo.icache_line_length, cpuinfo.icache_size); 233 cpuinfo.icache_line_length, cpuinfo.icache_size);
234#ifdef ASM_LOOP
233 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
236#else
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
239 : : "r" (i));
240#endif
234} 241}
235 242
236static void __flush_icache_all_msr_irq(void) 243static void __flush_icache_all_msr_irq(void)
237{ 244{
238 unsigned long flags; 245 unsigned long flags;
239 246#ifndef ASM_LOOP
247 int i;
248#endif
240 pr_debug("%s\n", __func__); 249 pr_debug("%s\n", __func__);
241 250
242 local_irq_save(flags); 251 local_irq_save(flags);
243 __disable_icache_msr(); 252 __disable_icache_msr();
244 253#ifdef ASM_LOOP
245 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
246 255#else
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
259 : : "r" (i));
260#endif
247 __enable_icache_msr(); 261 __enable_icache_msr();
248 local_irq_restore(flags); 262 local_irq_restore(flags);
249} 263}
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
251static void __flush_icache_all_nomsr_irq(void) 265static void __flush_icache_all_nomsr_irq(void)
252{ 266{
253 unsigned long flags; 267 unsigned long flags;
254 268#ifndef ASM_LOOP
269 int i;
270#endif
255 pr_debug("%s\n", __func__); 271 pr_debug("%s\n", __func__);
256 272
257 local_irq_save(flags); 273 local_irq_save(flags);
258 __disable_icache_nomsr(); 274 __disable_icache_nomsr();
259 275#ifdef ASM_LOOP
260 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
261 277#else
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
281 : : "r" (i));
282#endif
262 __enable_icache_nomsr(); 283 __enable_icache_nomsr();
263 local_irq_restore(flags); 284 local_irq_restore(flags);
264} 285}
265 286
266static void __flush_icache_all_noirq(void) 287static void __flush_icache_all_noirq(void)
267{ 288{
289#ifndef ASM_LOOP
290 int i;
291#endif
268 pr_debug("%s\n", __func__); 292 pr_debug("%s\n", __func__);
293#ifdef ASM_LOOP
269 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
295#else
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
299 : : "r" (i));
300#endif
270} 301}
271 302
272static void __invalidate_dcache_all_msr_irq(void) 303static void __invalidate_dcache_all_msr_irq(void)
273{ 304{
274 unsigned long flags; 305 unsigned long flags;
275 306#ifndef ASM_LOOP
307 int i;
308#endif
276 pr_debug("%s\n", __func__); 309 pr_debug("%s\n", __func__);
277 310
278 local_irq_save(flags); 311 local_irq_save(flags);
279 __disable_dcache_msr(); 312 __disable_dcache_msr();
280 313#ifdef ASM_LOOP
281 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
282 315#else
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
319 : : "r" (i));
320#endif
283 __enable_dcache_msr(); 321 __enable_dcache_msr();
284 local_irq_restore(flags); 322 local_irq_restore(flags);
285} 323}
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
287static void __invalidate_dcache_all_nomsr_irq(void) 325static void __invalidate_dcache_all_nomsr_irq(void)
288{ 326{
289 unsigned long flags; 327 unsigned long flags;
290 328#ifndef ASM_LOOP
329 int i;
330#endif
291 pr_debug("%s\n", __func__); 331 pr_debug("%s\n", __func__);
292 332
293 local_irq_save(flags); 333 local_irq_save(flags);
294 __disable_dcache_nomsr(); 334 __disable_dcache_nomsr();
295 335#ifdef ASM_LOOP
296 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
297 337#else
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
341 : : "r" (i));
342#endif
298 __enable_dcache_nomsr(); 343 __enable_dcache_nomsr();
299 local_irq_restore(flags); 344 local_irq_restore(flags);
300} 345}
301 346
302static void __invalidate_dcache_all_noirq_wt(void) 347static void __invalidate_dcache_all_noirq_wt(void)
303{ 348{
349#ifndef ASM_LOOP
350 int i;
351#endif
304 pr_debug("%s\n", __func__); 352 pr_debug("%s\n", __func__);
353#ifdef ASM_LOOP
305 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
355#else
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
359 : : "r" (i));
360#endif
306} 361}
307 362
308/* FIXME this is weird - should be only wdc but not work 363/* FIXME this is weird - should be only wdc but not work
309 * MS: I am getting bus errors and other weird things */ 364 * MS: I am getting bus errors and other weird things */
310static void __invalidate_dcache_all_wb(void) 365static void __invalidate_dcache_all_wb(void)
311{ 366{
367#ifndef ASM_LOOP
368 int i;
369#endif
312 pr_debug("%s\n", __func__); 370 pr_debug("%s\n", __func__);
371#ifdef ASM_LOOP
313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 372 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
314 wdc.clear) 373 wdc.clear)
374#else
375 for (i = 0; i < cpuinfo.dcache_size;
376 i += cpuinfo.dcache_line_length)
377 __asm__ __volatile__ ("wdc.clear %0, r0;" \
378 : : "r" (i));
379#endif
315} 380}
316 381
317static void __invalidate_dcache_range_wb(unsigned long start, 382static void __invalidate_dcache_range_wb(unsigned long start,
318 unsigned long end) 383 unsigned long end)
319{ 384{
385#ifndef ASM_LOOP
386 int i;
387#endif
320 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 388 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
321 (unsigned int)start, (unsigned int) end); 389 (unsigned int)start, (unsigned int) end);
322 390
323 CACHE_LOOP_LIMITS(start, end, 391 CACHE_LOOP_LIMITS(start, end,
324 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 392 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
393#ifdef ASM_LOOP
325 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 394 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
395#else
396 for (i = start; i < end; i += cpuinfo.icache_line_length)
397 __asm__ __volatile__ ("wdc.clear %0, r0;" \
398 : : "r" (i));
399#endif
326} 400}
327 401
328static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 402static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
329 unsigned long end) 403 unsigned long end)
330{ 404{
405#ifndef ASM_LOOP
406 int i;
407#endif
331 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 408 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
332 (unsigned int)start, (unsigned int) end); 409 (unsigned int)start, (unsigned int) end);
333 CACHE_LOOP_LIMITS(start, end, 410 CACHE_LOOP_LIMITS(start, end,
334 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 411 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
335 412
413#ifdef ASM_LOOP
336 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 414 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
415#else
416 for (i = start; i < end; i += cpuinfo.icache_line_length)
417 __asm__ __volatile__ ("wdc %0, r0;" \
418 : : "r" (i));
419#endif
337} 420}
338 421
339static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 422static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
340 unsigned long end) 423 unsigned long end)
341{ 424{
342 unsigned long flags; 425 unsigned long flags;
343 426#ifndef ASM_LOOP
427 int i;
428#endif
344 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 429 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
345 (unsigned int)start, (unsigned int) end); 430 (unsigned int)start, (unsigned int) end);
346 CACHE_LOOP_LIMITS(start, end, 431 CACHE_LOOP_LIMITS(start, end,
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
349 local_irq_save(flags); 434 local_irq_save(flags);
350 __disable_dcache_msr(); 435 __disable_dcache_msr();
351 436
437#ifdef ASM_LOOP
352 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 438 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
439#else
440 for (i = start; i < end; i += cpuinfo.icache_line_length)
441 __asm__ __volatile__ ("wdc %0, r0;" \
442 : : "r" (i));
443#endif
353 444
354 __enable_dcache_msr(); 445 __enable_dcache_msr();
355 local_irq_restore(flags); 446 local_irq_restore(flags);
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
359 unsigned long end) 450 unsigned long end)
360{ 451{
361 unsigned long flags; 452 unsigned long flags;
362 453#ifndef ASM_LOOP
454 int i;
455#endif
363 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 456 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
364 (unsigned int)start, (unsigned int) end); 457 (unsigned int)start, (unsigned int) end);
365 458
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
369 local_irq_save(flags); 462 local_irq_save(flags);
370 __disable_dcache_nomsr(); 463 __disable_dcache_nomsr();
371 464
465#ifdef ASM_LOOP
372 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 466 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
467#else
468 for (i = start; i < end; i += cpuinfo.icache_line_length)
469 __asm__ __volatile__ ("wdc %0, r0;" \
470 : : "r" (i));
471#endif
373 472
374 __enable_dcache_nomsr(); 473 __enable_dcache_nomsr();
375 local_irq_restore(flags); 474 local_irq_restore(flags);
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
377 476
378static void __flush_dcache_all_wb(void) 477static void __flush_dcache_all_wb(void)
379{ 478{
479#ifndef ASM_LOOP
480 int i;
481#endif
380 pr_debug("%s\n", __func__); 482 pr_debug("%s\n", __func__);
483#ifdef ASM_LOOP
381 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 484 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
382 wdc.flush); 485 wdc.flush);
486#else
487 for (i = 0; i < cpuinfo.dcache_size;
488 i += cpuinfo.dcache_line_length)
489 __asm__ __volatile__ ("wdc.flush %0, r0;" \
490 : : "r" (i));
491#endif
383} 492}
384 493
385static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 494static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
386{ 495{
496#ifndef ASM_LOOP
497 int i;
498#endif
387 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 499 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388 (unsigned int)start, (unsigned int) end); 500 (unsigned int)start, (unsigned int) end);
389 501
390 CACHE_LOOP_LIMITS(start, end, 502 CACHE_LOOP_LIMITS(start, end,
391 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 503 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
504#ifdef ASM_LOOP
392 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 505 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
506#else
507 for (i = start; i < end; i += cpuinfo.icache_line_length)
508 __asm__ __volatile__ ("wdc.flush %0, r0;" \
509 : : "r" (i));
510#endif
393} 511}
394 512
395/* struct for wb caches and for wt caches */ 513/* struct for wb caches and for wt caches */
@@ -493,7 +611,7 @@ const struct scache wt_nomsr_noirq = {
493#define CPUVER_7_20_A 0x0c 611#define CPUVER_7_20_A 0x0c
494#define CPUVER_7_20_D 0x0f 612#define CPUVER_7_20_D 0x0f
495 613
496#define INFO(s) printk(KERN_INFO "cache: " s " \n"); 614#define INFO(s) printk(KERN_INFO "cache: " s "\n");
497 615
498void microblaze_cache_init(void) 616void microblaze_cache_init(void)
499{ 617{
@@ -532,4 +650,9 @@ void microblaze_cache_init(void)
532 } 650 }
533 } 651 }
534 } 652 }
653 invalidate_dcache();
654 enable_dcache();
655
656 invalidate_icache();
657 enable_icache();
535} 658}
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 000000000000..b1084974fccd
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/dma-debug.h>
12#include <asm/bug.h>
13#include <asm/cacheflush.h>
14
15/*
16 * Generic direct DMA implementation
17 *
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
21 * default the offset is PCI_DRAM_OFFSET.
22 */
23static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
24 size_t size, enum dma_data_direction direction)
25{
26 switch (direction) {
27 case DMA_TO_DEVICE:
28 flush_dcache_range(paddr + offset, paddr + offset + size);
29 break;
30 case DMA_FROM_DEVICE:
31 invalidate_dcache_range(paddr + offset, paddr + offset + size);
32 break;
33 default:
34 BUG();
35 }
36}
37
38static unsigned long get_dma_direct_offset(struct device *dev)
39{
40 if (dev)
41 return (unsigned long)dev->archdata.dma_data;
42
43 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
44}
45
46#define NOT_COHERENT_CACHE
47
48static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
49 dma_addr_t *dma_handle, gfp_t flag)
50{
51#ifdef NOT_COHERENT_CACHE
52 return consistent_alloc(flag, size, dma_handle);
53#else
54 void *ret;
55 struct page *page;
56 int node = dev_to_node(dev);
57
58 /* ignore region specifiers */
59 flag &= ~(__GFP_HIGHMEM);
60
61 page = alloc_pages_node(node, flag, get_order(size));
62 if (page == NULL)
63 return NULL;
64 ret = page_address(page);
65 memset(ret, 0, size);
66 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
67
68 return ret;
69#endif
70}
71
72static void dma_direct_free_coherent(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle)
74{
75#ifdef NOT_COHERENT_CACHE
76 consistent_free(vaddr);
77#else
78 free_pages((unsigned long)vaddr, get_order(size));
79#endif
80}
81
82static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
83 int nents, enum dma_data_direction direction,
84 struct dma_attrs *attrs)
85{
86 struct scatterlist *sg;
87 int i;
88
89 /* FIXME this part of code is untested */
90 for_each_sg(sgl, sg, nents, i) {
91 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
92 sg->dma_length = sg->length;
93 __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
94 sg->length, direction);
95 }
96
97 return nents;
98}
99
100static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
101 int nents, enum dma_data_direction direction,
102 struct dma_attrs *attrs)
103{
104}
105
106static int dma_direct_dma_supported(struct device *dev, u64 mask)
107{
108 return 1;
109}
110
111static inline dma_addr_t dma_direct_map_page(struct device *dev,
112 struct page *page,
113 unsigned long offset,
114 size_t size,
115 enum dma_data_direction direction,
116 struct dma_attrs *attrs)
117{
118 __dma_sync_page(page_to_phys(page), offset, size, direction);
119 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
120}
121
122static inline void dma_direct_unmap_page(struct device *dev,
123 dma_addr_t dma_address,
124 size_t size,
125 enum dma_data_direction direction,
126 struct dma_attrs *attrs)
127{
128/* There is not necessary to do cache cleanup
129 *
130 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
131 * dma_address is physical address
132 */
133 __dma_sync_page(dma_address, 0 , size, direction);
134}
135
136struct dma_map_ops dma_direct_ops = {
137 .alloc_coherent = dma_direct_alloc_coherent,
138 .free_coherent = dma_direct_free_coherent,
139 .map_sg = dma_direct_map_sg,
140 .unmap_sg = dma_direct_unmap_sg,
141 .dma_supported = dma_direct_dma_supported,
142 .map_page = dma_direct_map_page,
143 .unmap_page = dma_direct_unmap_page,
144};
145EXPORT_SYMBOL(dma_direct_ops);
146
147/* Number of entries preallocated for DMA-API debugging */
148#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
149
150static int __init dma_init(void)
151{
152 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
153
154 return 0;
155}
156fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 3bad4ff49471..c0ede25c5b99 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception):
305 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
306 addi r11, r0, 1; 306 addi r11, r0, 1;
307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3082: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 3082: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
309 /* Save away the syscall number. */ 309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0; 310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1) 311 tovirt(r1,r1)
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception):
322 rtid r11, 0 322 rtid r11, 0
323 nop 323 nop
3243: 3243:
325 add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ 325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
326 lwi r11, r11, TS_THREAD_INFO /* get thread info */
327 lwi r11, r11, TI_FLAGS /* get flags in thread info */ 326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
328 andi r11, r11, _TIF_WORK_SYSCALL_MASK 327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
329 beqi r11, 4f 328 beqi r11, 4f
@@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap):
382/* See if returning to kernel mode, if so, skip resched &c. */ 381/* See if returning to kernel mode, if so, skip resched &c. */
383 bnei r11, 2f; 382 bnei r11, 2f;
384 383
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
385 /* We're returning to user mode, so check for various conditions that 387 /* We're returning to user mode, so check for various conditions that
386 * trigger rescheduling. */ 388 * trigger rescheduling. */
387 # FIXME: Restructure all these flag checks. 389 /* FIXME: Restructure all these flag checks. */
388 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 390 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
389 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
390 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 391 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
391 andi r11, r11, _TIF_WORK_SYSCALL_MASK 392 andi r11, r11, _TIF_WORK_SYSCALL_MASK
392 beqi r11, 1f 393 beqi r11, 1f
393 394
394 swi r3, r1, PTO + PT_R3
395 swi r4, r1, PTO + PT_R4
396 brlid r15, do_syscall_trace_leave 395 brlid r15, do_syscall_trace_leave
397 addik r5, r1, PTO + PT_R0 396 addik r5, r1, PTO + PT_R0
398 lwi r3, r1, PTO + PT_R3
399 lwi r4, r1, PTO + PT_R4
4001: 3971:
401
402 /* We're returning to user mode, so check for various conditions that 398 /* We're returning to user mode, so check for various conditions that
403 * trigger rescheduling. */ 399 * trigger rescheduling. */
404 /* Get current task ptr into r11 */ 400 /* get thread info from current task */
405 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 401 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
406 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
407 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
408 andi r11, r11, _TIF_NEED_RESCHED; 403 andi r11, r11, _TIF_NEED_RESCHED;
409 beqi r11, 5f; 404 beqi r11, 5f;
410 405
411 swi r3, r1, PTO + PT_R3; /* store syscall result */
412 swi r4, r1, PTO + PT_R4;
413 bralid r15, schedule; /* Call scheduler */ 406 bralid r15, schedule; /* Call scheduler */
414 nop; /* delay slot */ 407 nop; /* delay slot */
415 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
416 lwi r4, r1, PTO + PT_R4;
417 408
418 /* Maybe handle a signal */ 409 /* Maybe handle a signal */
4195: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 4105: /* get thread info from current task*/
420 lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 411 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
421 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 412 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
422 andi r11, r11, _TIF_SIGPENDING; 413 andi r11, r11, _TIF_SIGPENDING;
423 beqi r11, 1f; /* Signals to handle, handle them */ 414 beqi r11, 1f; /* Signals to handle, handle them */
424 415
425 swi r3, r1, PTO + PT_R3; /* store syscall result */
426 swi r4, r1, PTO + PT_R4;
427 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 416 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
428 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
429 addi r7, r0, 1; /* Arg 3: int in_syscall */ 417 addi r7, r0, 1; /* Arg 3: int in_syscall */
430 bralid r15, do_signal; /* Handle any signals */ 418 bralid r15, do_signal; /* Handle any signals */
431 nop; 419 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
420
421/* Finally, return to user state. */
4221:
432 lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 423 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
433 lwi r4, r1, PTO + PT_R4; 424 lwi r4, r1, PTO + PT_R4;
434 425
435/* Finally, return to user state. */ 426 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
4361: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 427 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
437 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
438 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
439 VM_OFF; 428 VM_OFF;
440 tophys(r1,r1); 429 tophys(r1,r1);
441 RESTORE_REGS; 430 RESTORE_REGS;
@@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
565 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ 554 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
566 addi r11, r0, 1; \ 555 addi r11, r0, 1; \
567 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ 556 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5682: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ 5572: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
569 /* Save away the syscall number. */ \ 558 /* Save away the syscall number. */ \
570 swi r0, r1, PTO+PT_R0; \ 559 swi r0, r1, PTO+PT_R0; \
571 tovirt(r1,r1) 560 tovirt(r1,r1)
@@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc):
673 662
674 /* We're returning to user mode, so check for various conditions that 663 /* We're returning to user mode, so check for various conditions that
675 trigger rescheduling. */ 664 trigger rescheduling. */
676 /* Get current task ptr into r11 */ 665 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
677 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
678 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
679 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 666 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
680 andi r11, r11, _TIF_NEED_RESCHED; 667 andi r11, r11, _TIF_NEED_RESCHED;
681 beqi r11, 5f; 668 beqi r11, 5f;
@@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc):
685 nop; /* delay slot */ 672 nop; /* delay slot */
686 673
687 /* Maybe handle a signal */ 674 /* Maybe handle a signal */
6885: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 6755: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
689 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
690 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 676 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
691 andi r11, r11, _TIF_SIGPENDING; 677 andi r11, r11, _TIF_SIGPENDING;
692 beqi r11, 1f; /* Signals to handle, handle them */ 678 beqi r11, 1f; /* Signals to handle, handle them */
@@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc):
705 * store return registers separately because this macros is use 691 * store return registers separately because this macros is use
706 * for others exceptions */ 692 * for others exceptions */
707 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 693 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
708 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
709 addi r7, r0, 0; /* Arg 3: int in_syscall */ 694 addi r7, r0, 0; /* Arg 3: int in_syscall */
710 bralid r15, do_signal; /* Handle any signals */ 695 bralid r15, do_signal; /* Handle any signals */
711 nop; 696 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
712 697
713/* Finally, return to user state. */ 698/* Finally, return to user state. */
7141: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 6991: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
715 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 700 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
716 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
717 VM_OFF; 701 VM_OFF;
718 tophys(r1,r1); 702 tophys(r1,r1);
719 703
@@ -802,7 +786,7 @@ C_ENTRY(_interrupt):
802 swi r11, r0, TOPHYS(PER_CPU(KM)); 786 swi r11, r0, TOPHYS(PER_CPU(KM));
803 787
8042: 7882:
805 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 789 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
806 swi r0, r1, PTO + PT_R0; 790 swi r0, r1, PTO + PT_R0;
807 tovirt(r1,r1) 791 tovirt(r1,r1)
808 la r5, r1, PTO; 792 la r5, r1, PTO;
@@ -817,8 +801,7 @@ ret_from_irq:
817 lwi r11, r1, PTO + PT_MODE; 801 lwi r11, r1, PTO + PT_MODE;
818 bnei r11, 2f; 802 bnei r11, 2f;
819 803
820 add r11, r0, CURRENT_TASK; 804 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
821 lwi r11, r11, TS_THREAD_INFO;
822 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ 805 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
823 andi r11, r11, _TIF_NEED_RESCHED; 806 andi r11, r11, _TIF_NEED_RESCHED;
824 beqi r11, 5f 807 beqi r11, 5f
@@ -826,8 +809,7 @@ ret_from_irq:
826 nop; /* delay slot */ 809 nop; /* delay slot */
827 810
828 /* Maybe handle a signal */ 811 /* Maybe handle a signal */
8295: add r11, r0, CURRENT_TASK; 8125: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
830 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
831 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 813 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
832 andi r11, r11, _TIF_SIGPENDING; 814 andi r11, r11, _TIF_SIGPENDING;
833 beqid r11, no_intr_resched 815 beqid r11, no_intr_resched
@@ -842,8 +824,7 @@ no_intr_resched:
842 /* Disable interrupts, we are now committed to the state restore */ 824 /* Disable interrupts, we are now committed to the state restore */
843 disable_irq 825 disable_irq
844 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ 826 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
845 add r11, r0, CURRENT_TASK; 827 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
846 swi r11, r0, PER_CPU(CURRENT_SAVE);
847 VM_OFF; 828 VM_OFF;
848 tophys(r1,r1); 829 tophys(r1,r1);
849 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 830 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@@ -853,7 +834,28 @@ no_intr_resched:
853 lwi r1, r1, PT_R1 - PT_SIZE; 834 lwi r1, r1, PT_R1 - PT_SIZE;
854 bri 6f; 835 bri 6f;
855/* MS: Return to kernel state. */ 836/* MS: Return to kernel state. */
8562: VM_OFF /* MS: turn off MMU */ 8372:
838#ifdef CONFIG_PREEMPT
839 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
840 /* MS: get preempt_count from thread info */
841 lwi r5, r11, TI_PREEMPT_COUNT;
842 bgti r5, restore;
843
844 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
845 andi r5, r5, _TIF_NEED_RESCHED;
846 beqi r5, restore /* if zero jump over */
847
848preempt:
849 /* interrupts are off that's why I am calling preempt_chedule_irq */
850 bralid r15, preempt_schedule_irq
851 nop
852 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
853 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
854 andi r5, r5, _TIF_NEED_RESCHED;
855 bnei r5, preempt /* if non zero jump to resched */
856restore:
857#endif
858 VM_OFF /* MS: turn off MMU */
857 tophys(r1,r1) 859 tophys(r1,r1)
858 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 860 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
859 lwi r4, r1, PTO + PT_R4; 861 lwi r4, r1, PTO + PT_R4;
@@ -915,7 +917,7 @@ C_ENTRY(_debug_exception):
915 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 917 swi r11, r1, PTO+PT_R1; /* Store user SP. */
916 addi r11, r0, 1; 918 addi r11, r0, 1;
917 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 919 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9182: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 9202: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
919 /* Save away the syscall number. */ 921 /* Save away the syscall number. */
920 swi r0, r1, PTO+PT_R0; 922 swi r0, r1, PTO+PT_R0;
921 tovirt(r1,r1) 923 tovirt(r1,r1)
@@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
935 bnei r11, 2f; 937 bnei r11, 2f;
936 938
937 /* Get current task ptr into r11 */ 939 /* Get current task ptr into r11 */
938 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 940 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
939 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
940 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 941 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
941 andi r11, r11, _TIF_NEED_RESCHED; 942 andi r11, r11, _TIF_NEED_RESCHED;
942 beqi r11, 5f; 943 beqi r11, 5f;
@@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
949 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ 950 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
950 951
951 /* Maybe handle a signal */ 952 /* Maybe handle a signal */
9525: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 9535: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
953 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
955 andi r11, r11, _TIF_SIGPENDING; 955 andi r11, r11, _TIF_SIGPENDING;
956 beqi r11, 1f; /* Signals to handle, handle them */ 956 beqi r11, 1f; /* Signals to handle, handle them */
@@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
966 (in a possibly modified form) after do_signal returns. */ 966 (in a possibly modified form) after do_signal returns. */
967 967
968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
969 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
970 addi r7, r0, 0; /* Arg 3: int in_syscall */ 969 addi r7, r0, 0; /* Arg 3: int in_syscall */
971 bralid r15, do_signal; /* Handle any signals */ 970 bralid r15, do_signal; /* Handle any signals */
972 nop; 971 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
973 972
974 973
975/* Finally, return to user state. */ 974/* Finally, return to user state. */
9761: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 9751: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
977 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 976 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
978 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
979 VM_OFF; 977 VM_OFF;
980 tophys(r1,r1); 978 tophys(r1,r1);
981 979
@@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
1007 1005
1008ENTRY(_switch_to) 1006ENTRY(_switch_to)
1009 /* prepare return value */ 1007 /* prepare return value */
1010 addk r3, r0, r31 1008 addk r3, r0, CURRENT_TASK
1011 1009
1012 /* save registers in cpu_context */ 1010 /* save registers in cpu_context */
1013 /* use r11 and r12, volatile registers, as temp register */ 1011 /* use r11 and r12, volatile registers, as temp register */
@@ -1051,10 +1049,10 @@ ENTRY(_switch_to)
1051 nop 1049 nop
1052 swi r12, r11, CC_FSR 1050 swi r12, r11, CC_FSR
1053 1051
1054 /* update r31, the current */ 1052 /* update r31, the current-give me pointer to task which will be next */
1055 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ 1053 lwi CURRENT_TASK, r6, TI_TASK
1056 /* stored it to current_save too */ 1054 /* stored it to current_save too */
1057 swi r31, r0, PER_CPU(CURRENT_SAVE) 1055 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1058 1056
1059 /* get new process' cpu context and restore */ 1057 /* get new process' cpu context and restore */
1060 /* give me start where start context of next task */ 1058 /* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 30916193fcc7..cb7815cfe5ab 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -99,8 +99,8 @@ no_fdt_arg:
99 tophys(r4,r4) /* convert to phys address */ 99 tophys(r4,r4) /* convert to phys address */
100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
101_copy_command_line: 101_copy_command_line:
102 lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 102 lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
103 sb r7, r4, r6 /* addr[r4+r6]= r7*/ 103 sb r2, r4, r6 /* addr[r4+r6]= r7*/
104 addik r6, r6, 1 /* increment counting */ 104 addik r6, r6, 1 /* increment counting */
105 bgtid r3, _copy_command_line /* loop for all entries */ 105 bgtid r3, _copy_command_line /* loop for all entries */
106 addik r3, r3, -1 /* descrement loop */ 106 addik r3, r3, -1 /* descrement loop */
@@ -136,6 +136,11 @@ _invalidate:
136 addik r3, r3, -1 136 addik r3, r3, -1
137 /* sync */ 137 /* sync */
138 138
139 /* Setup the kernel PID */
140 mts rpid,r0 /* Load the kernel PID */
141 nop
142 bri 4
143
139 /* 144 /*
140 * We should still be executing code at physical address area 145 * We should still be executing code at physical address area
141 * RAM_BASEADDR at this point. However, kernel code is at 146 * RAM_BASEADDR at this point. However, kernel code is at
@@ -146,10 +151,6 @@ _invalidate:
146 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ 151 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
147 tophys(r4,r3) /* Load the kernel physical address */ 152 tophys(r4,r3) /* Load the kernel physical address */
148 153
149 mts rpid,r0 /* Load the kernel PID */
150 nop
151 bri 4
152
153 /* 154 /*
154 * Configure and load two entries into TLB slots 0 and 1. 155 * Configure and load two entries into TLB slots 0 and 1.
155 * In case we are pinning TLBs, these are reserved in by the 156 * In case we are pinning TLBs, these are reserved in by the
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 0f06034d1fe0..6f39e2c001f3 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -93,3 +93,18 @@ skip:
93 } 93 }
94 return 0; 94 return 0;
95} 95}
96
97/* MS: There is no any advance mapping mechanism. We are using simple 32bit
98 intc without any cascades or any connection that's why mapping is 1:1 */
99unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
100{
101 return hwirq;
102}
103EXPORT_SYMBOL_GPL(irq_create_mapping);
104
105unsigned int irq_create_of_mapping(struct device_node *controller,
106 u32 *intspec, unsigned int intsize)
107{
108 return intspec[0];
109}
110EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bb8c4b9ccb80..f974ec7aa357 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -22,7 +22,10 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <linux/param.h> 24#include <linux/param.h>
25#include <linux/pci.h>
25#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/of_platform.h>
28#include <linux/dma-mapping.h>
26#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
27#include <asm/entry.h> 30#include <asm/entry.h>
28#include <asm/cpuinfo.h> 31#include <asm/cpuinfo.h>
@@ -54,14 +57,10 @@ void __init setup_arch(char **cmdline_p)
54 57
55 microblaze_cache_init(); 58 microblaze_cache_init();
56 59
57 invalidate_dcache();
58 enable_dcache();
59
60 invalidate_icache();
61 enable_icache();
62
63 setup_memory(); 60 setup_memory();
64 61
62 xilinx_pci_init();
63
65#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 64#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
66 printk(KERN_NOTICE "Self modified code enable\n"); 65 printk(KERN_NOTICE "Self modified code enable\n");
67#endif 66#endif
@@ -188,3 +187,37 @@ static int microblaze_debugfs_init(void)
188} 187}
189arch_initcall(microblaze_debugfs_init); 188arch_initcall(microblaze_debugfs_init);
190#endif 189#endif
190
191static int dflt_bus_notify(struct notifier_block *nb,
192 unsigned long action, void *data)
193{
194 struct device *dev = data;
195
196 /* We are only intereted in device addition */
197 if (action != BUS_NOTIFY_ADD_DEVICE)
198 return 0;
199
200 set_dma_ops(dev, &dma_direct_ops);
201
202 return NOTIFY_DONE;
203}
204
205static struct notifier_block dflt_plat_bus_notifier = {
206 .notifier_call = dflt_bus_notify,
207 .priority = INT_MAX,
208};
209
210static struct notifier_block dflt_of_bus_notifier = {
211 .notifier_call = dflt_bus_notify,
212 .priority = INT_MAX,
213};
214
215static int __init setup_bus_notifier(void)
216{
217 bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
218 bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
219
220 return 0;
221}
222
223arch_initcall(setup_bus_notifier);
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index 6c8a924d9e26..09c49ed87235 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile 2# Makefile
3# 3#
4 4
5obj-y := init.o 5obj-y := consistent.o init.o
6 6
7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o 7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
new file mode 100644
index 000000000000..a9b443e3fb98
--- /dev/null
+++ b/arch/microblaze/mm/consistent.c
@@ -0,0 +1,246 @@
1/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35
36#include <asm/pgalloc.h>
37#include <linux/io.h>
38#include <linux/hardirq.h>
39#include <asm/mmu_context.h>
40#include <asm/mmu.h>
41#include <linux/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/cpuinfo.h>
44
45#ifndef CONFIG_MMU
46
47/* I have to use dcache values because I can't relate on ram size */
48#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
49
50/*
51 * Consistent memory allocators. Used for DMA devices that want to
52 * share uncached memory with the processor core.
53 * My crufty no-MMU approach is simple. In the HW platform we can optionally
54 * mirror the DDR up above the processor cacheable region. So, memory accessed
55 * in this mirror region will not be cached. It's alloced from the same
56 * pool as normal memory, but the handle we return is shifted up into the
57 * uncached region. This will no doubt cause big problems if memory allocated
58 * here is not also freed properly. -- JW
59 */
60void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
61{
62 struct page *page, *end, *free;
63 unsigned long order;
64 void *ret, *virt;
65
66 if (in_interrupt())
67 BUG();
68
69 size = PAGE_ALIGN(size);
70 order = get_order(size);
71
72 page = alloc_pages(gfp, order);
73 if (!page)
74 goto no_page;
75
76 /* We could do with a page_to_phys and page_to_bus here. */
77 virt = page_address(page);
78 ret = ioremap(virt_to_phys(virt), size);
79 if (!ret)
80 goto no_remap;
81
82 /*
83 * Here's the magic! Note if the uncached shadow is not implemented,
84 * it's up to the calling code to also test that condition and make
85 * other arranegments, such as manually flushing the cache and so on.
86 */
87#ifdef CONFIG_XILINX_UNCACHED_SHADOW
88 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
89#endif
90 /* dma_handle is same as physical (shadowed) address */
91 *dma_handle = (dma_addr_t)ret;
92
93 /*
94 * free wasted pages. We skip the first page since we know
95 * that it will have count = 1 and won't require freeing.
96 * We also mark the pages in use as reserved so that
97 * remap_page_range works.
98 */
99 page = virt_to_page(virt);
100 free = page + (size >> PAGE_SHIFT);
101 end = page + (1 << order);
102
103 for (; page < end; page++) {
104 init_page_count(page);
105 if (page >= free)
106 __free_page(page);
107 else
108 SetPageReserved(page);
109 }
110
111 return ret;
112no_remap:
113 __free_pages(page, order);
114no_page:
115 return NULL;
116}
117
118#else
119
120void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
121{
122 int order, err, i;
123 unsigned long page, va, flags;
124 phys_addr_t pa;
125 struct vm_struct *area;
126 void *ret;
127
128 if (in_interrupt())
129 BUG();
130
131 /* Only allocate page size areas. */
132 size = PAGE_ALIGN(size);
133 order = get_order(size);
134
135 page = __get_free_pages(gfp, order);
136 if (!page) {
137 BUG();
138 return NULL;
139 }
140
141 /*
142 * we need to ensure that there are no cachelines in use,
143 * or worse dirty in this area.
144 */
145 flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
146
147 /* Allocate some common virtual space to map the new pages. */
148 area = get_vm_area(size, VM_ALLOC);
149 if (area == NULL) {
150 free_pages(page, order);
151 return NULL;
152 }
153 va = (unsigned long) area->addr;
154 ret = (void *)va;
155
156 /* This gives us the real physical address of the first page. */
157 *dma_handle = pa = virt_to_bus((void *)page);
158
159 /* MS: This is the whole magic - use cache inhibit pages */
160 flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
161
162 /*
163 * Set refcount=1 on all pages in an order>0
164 * allocation so that vfree() will actually
165 * free all pages that were allocated.
166 */
167 if (order > 0) {
168 struct page *rpage = virt_to_page(page);
169 for (i = 1; i < (1 << order); i++)
170 init_page_count(rpage+i);
171 }
172
173 err = 0;
174 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
175 err = map_page(va+i, pa+i, flags);
176
177 if (err) {
178 vfree((void *)va);
179 return NULL;
180 }
181
182 return ret;
183}
184#endif /* CONFIG_MMU */
185EXPORT_SYMBOL(consistent_alloc);
186
187/*
188 * free page(s) as defined by the above mapping.
189 */
190void consistent_free(void *vaddr)
191{
192 if (in_interrupt())
193 BUG();
194
195 /* Clear SHADOW_MASK bit in address, and free as per usual */
196#ifdef CONFIG_XILINX_UNCACHED_SHADOW
197 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
198#endif
199 vfree(vaddr);
200}
201EXPORT_SYMBOL(consistent_free);
202
203/*
204 * make an area consistent.
205 */
206void consistent_sync(void *vaddr, size_t size, int direction)
207{
208 unsigned long start;
209 unsigned long end;
210
211 start = (unsigned long)vaddr;
212
213 /* Convert start address back down to unshadowed memory region */
214#ifdef CONFIG_XILINX_UNCACHED_SHADOW
215 start &= ~UNCACHED_SHADOW_MASK;
216#endif
217 end = start + size;
218
219 switch (direction) {
220 case PCI_DMA_NONE:
221 BUG();
222 case PCI_DMA_FROMDEVICE: /* invalidate only */
223 flush_dcache_range(start, end);
224 break;
225 case PCI_DMA_TODEVICE: /* writeback only */
226 flush_dcache_range(start, end);
227 break;
228 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
229 flush_dcache_range(start, end);
230 break;
231 }
232}
233EXPORT_SYMBOL(consistent_sync);
234
235/*
236 * consistent_sync_page makes memory consistent. identical
237 * to consistent_sync, but takes a struct page instead of a
238 * virtual address
239 */
240void consistent_sync_page(struct page *page, unsigned long offset,
241 size_t size, int direction)
242{
243 unsigned long start = (unsigned long)page_address(page) + offset;
244 consistent_sync((void *)start, size, direction);
245}
246EXPORT_SYMBOL(consistent_sync_page);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a57cedf36715..1608e2e1a44a 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -23,6 +23,9 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/tlb.h> 24#include <asm/tlb.h>
25 25
26/* Use for MMU and noMMU because of PCI generic code */
27int mem_init_done;
28
26#ifndef CONFIG_MMU 29#ifndef CONFIG_MMU
27unsigned int __page_offset; 30unsigned int __page_offset;
28EXPORT_SYMBOL(__page_offset); 31EXPORT_SYMBOL(__page_offset);
@@ -30,7 +33,6 @@ EXPORT_SYMBOL(__page_offset);
30#else 33#else
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 35
33int mem_init_done;
34static int init_bootmem_done; 36static int init_bootmem_done;
35#endif /* CONFIG_MMU */ 37#endif /* CONFIG_MMU */
36 38
@@ -193,12 +195,6 @@ void free_initmem(void)
193 (unsigned long)(&__init_end)); 195 (unsigned long)(&__init_end));
194} 196}
195 197
196/* FIXME from arch/powerpc/mm/mem.c*/
197void show_mem(void)
198{
199 printk(KERN_NOTICE "%s\n", __func__);
200}
201
202void __init mem_init(void) 198void __init mem_init(void)
203{ 199{
204 high_memory = (void *)__va(memory_end); 200 high_memory = (void *)__va(memory_end);
@@ -208,9 +204,7 @@ void __init mem_init(void)
208 printk(KERN_INFO "Memory: %luk/%luk available\n", 204 printk(KERN_INFO "Memory: %luk/%luk available\n",
209 nr_free_pages() << (PAGE_SHIFT-10), 205 nr_free_pages() << (PAGE_SHIFT-10),
210 num_physpages << (PAGE_SHIFT-10)); 206 num_physpages << (PAGE_SHIFT-10));
211#ifdef CONFIG_MMU
212 mem_init_done = 1; 207 mem_init_done = 1;
213#endif
214} 208}
215 209
216#ifndef CONFIG_MMU 210#ifndef CONFIG_MMU
@@ -222,6 +216,10 @@ int ___range_ok(unsigned long addr, unsigned long size)
222} 216}
223EXPORT_SYMBOL(___range_ok); 217EXPORT_SYMBOL(___range_ok);
224 218
219int page_is_ram(unsigned long pfn)
220{
221 return __range_ok(pfn, 0);
222}
225#else 223#else
226int page_is_ram(unsigned long pfn) 224int page_is_ram(unsigned long pfn)
227{ 225{
@@ -349,4 +347,27 @@ void __init *early_get_page(void)
349 } 347 }
350 return p; 348 return p;
351} 349}
350
352#endif /* CONFIG_MMU */ 351#endif /* CONFIG_MMU */
352
353void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
354{
355 if (mem_init_done)
356 return kmalloc(size, mask);
357 else
358 return alloc_bootmem(size);
359}
360
361void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
362{
363 void *p;
364
365 if (mem_init_done)
366 p = kzalloc(size, mask);
367 else {
368 p = alloc_bootmem(size);
369 if (p)
370 memset(p, 0, size);
371 }
372 return p;
373}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 2820081b21ab..63a6fd07c48f 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -103,7 +103,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
103 area = get_vm_area(size, VM_IOREMAP); 103 area = get_vm_area(size, VM_IOREMAP);
104 if (area == NULL) 104 if (area == NULL)
105 return NULL; 105 return NULL;
106 v = VMALLOC_VMADDR(area->addr); 106 v = (unsigned long) area->addr;
107 } else { 107 } else {
108 v = (ioremap_bot -= size); 108 v = (ioremap_bot -= size);
109 } 109 }
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
new file mode 100644
index 000000000000..9889cc2e1294
--- /dev/null
+++ b/arch/microblaze/pci/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile
3#
4
5obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
6obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c
new file mode 100644
index 000000000000..25f18f017f21
--- /dev/null
+++ b/arch/microblaze/pci/indirect_pci.c
@@ -0,0 +1,163 @@
1/*
2 * Support for indirect PCI bridges.
3 *
4 * Copyright (C) 1998 Gabriel Paubert.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19#include <asm/prom.h>
20#include <asm/pci-bridge.h>
21
22static int
23indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
24 int len, u32 *val)
25{
26 struct pci_controller *hose = pci_bus_to_host(bus);
27 volatile void __iomem *cfg_data;
28 u8 cfg_type = 0;
29 u32 bus_no, reg;
30
31 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
32 if (bus->number != hose->first_busno)
33 return PCIBIOS_DEVICE_NOT_FOUND;
34 if (devfn != 0)
35 return PCIBIOS_DEVICE_NOT_FOUND;
36 }
37
38 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
39 if (bus->number != hose->first_busno)
40 cfg_type = 1;
41
42 bus_no = (bus->number == hose->first_busno) ?
43 hose->self_busno : bus->number;
44
45 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
46 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
47 else
48 reg = offset & 0xfc; /* Only 3 bits for function */
49
50 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
51 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
52 (devfn << 8) | reg | cfg_type));
53 else
54 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
55 (devfn << 8) | reg | cfg_type));
56
57 /*
58 * Note: the caller has already checked that offset is
59 * suitably aligned and that len is 1, 2 or 4.
60 */
61 cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
62 switch (len) {
63 case 1:
64 *val = in_8(cfg_data);
65 break;
66 case 2:
67 *val = in_le16(cfg_data);
68 break;
69 default:
70 *val = in_le32(cfg_data);
71 break;
72 }
73 return PCIBIOS_SUCCESSFUL;
74}
75
76static int
77indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
78 int len, u32 val)
79{
80 struct pci_controller *hose = pci_bus_to_host(bus);
81 volatile void __iomem *cfg_data;
82 u8 cfg_type = 0;
83 u32 bus_no, reg;
84
85 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
86 if (bus->number != hose->first_busno)
87 return PCIBIOS_DEVICE_NOT_FOUND;
88 if (devfn != 0)
89 return PCIBIOS_DEVICE_NOT_FOUND;
90 }
91
92 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
93 if (bus->number != hose->first_busno)
94 cfg_type = 1;
95
96 bus_no = (bus->number == hose->first_busno) ?
97 hose->self_busno : bus->number;
98
99 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
100 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
101 else
102 reg = offset & 0xfc;
103
104 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
105 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
106 (devfn << 8) | reg | cfg_type));
107 else
108 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
109 (devfn << 8) | reg | cfg_type));
110
111 /* surpress setting of PCI_PRIMARY_BUS */
112 if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
113 if ((offset == PCI_PRIMARY_BUS) &&
114 (bus->number == hose->first_busno))
115 val &= 0xffffff00;
116
117 /* Workaround for PCI_28 Errata in 440EPx/GRx */
118 if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
119 offset == PCI_CACHE_LINE_SIZE) {
120 val = 0;
121 }
122
123 /*
124 * Note: the caller has already checked that offset is
125 * suitably aligned and that len is 1, 2 or 4.
126 */
127 cfg_data = hose->cfg_data + (offset & 3);
128 switch (len) {
129 case 1:
130 out_8(cfg_data, val);
131 break;
132 case 2:
133 out_le16(cfg_data, val);
134 break;
135 default:
136 out_le32(cfg_data, val);
137 break;
138 }
139
140 return PCIBIOS_SUCCESSFUL;
141}
142
143static struct pci_ops indirect_pci_ops = {
144 .read = indirect_read_config,
145 .write = indirect_write_config,
146};
147
148void __init
149setup_indirect_pci(struct pci_controller *hose,
150 resource_size_t cfg_addr,
151 resource_size_t cfg_data, u32 flags)
152{
153 resource_size_t base = cfg_addr & PAGE_MASK;
154 void __iomem *mbase;
155
156 mbase = ioremap(base, PAGE_SIZE);
157 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
158 if ((cfg_data & PAGE_MASK) != base)
159 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
160 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
161 hose->ops = &indirect_pci_ops;
162 hose->indirect_type = flags;
163}
diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c
new file mode 100644
index 000000000000..3fbf16f4e16c
--- /dev/null
+++ b/arch/microblaze/pci/iomap.c
@@ -0,0 +1,39 @@
1/*
2 * ppc64 "iomap" interface implementation.
3 *
4 * (C) Copyright 2004 Linus Torvalds
5 */
6#include <linux/init.h>
7#include <linux/pci.h>
8#include <linux/mm.h>
9#include <asm/io.h>
10#include <asm/pci-bridge.h>
11
12void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
13{
14 resource_size_t start = pci_resource_start(dev, bar);
15 resource_size_t len = pci_resource_len(dev, bar);
16 unsigned long flags = pci_resource_flags(dev, bar);
17
18 if (!len)
19 return NULL;
20 if (max && len > max)
21 len = max;
22 if (flags & IORESOURCE_IO)
23 return ioport_map(start, len);
24 if (flags & IORESOURCE_MEM)
25 return ioremap(start, len);
26 /* What? */
27 return NULL;
28}
29EXPORT_SYMBOL(pci_iomap);
30
31void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
32{
33 if (isa_vaddr_is_ioport(addr))
34 return;
35 if (pcibios_vaddr_is_ioport(addr))
36 return;
37 iounmap(addr);
38}
39EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
new file mode 100644
index 000000000000..0be34350d733
--- /dev/null
+++ b/arch/microblaze/pci/pci-common.c
@@ -0,0 +1,1642 @@
1/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29
30#include <asm/processor.h>
31#include <asm/io.h>
32#include <asm/prom.h>
33#include <asm/pci-bridge.h>
34#include <asm/byteorder.h>
35
36static DEFINE_SPINLOCK(hose_spinlock);
37LIST_HEAD(hose_list);
38
39/* XXX kill that some day ... */
40static int global_phb_number; /* Global phb counter */
41
42/* ISA Memory physical address */
43resource_size_t isa_mem_base;
44
45/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
46unsigned int pci_flags;
47
48static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
49
50void set_pci_dma_ops(struct dma_map_ops *dma_ops)
51{
52 pci_dma_ops = dma_ops;
53}
54
55struct dma_map_ops *get_pci_dma_ops(void)
56{
57 return pci_dma_ops;
58}
59EXPORT_SYMBOL(get_pci_dma_ops);
60
61int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
62{
63 return dma_set_mask(&dev->dev, mask);
64}
65
66int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
67{
68 int rc;
69
70 rc = dma_set_mask(&dev->dev, mask);
71 dev->dev.coherent_dma_mask = dev->dma_mask;
72
73 return rc;
74}
75
76struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
77{
78 struct pci_controller *phb;
79
80 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
81 if (!phb)
82 return NULL;
83 spin_lock(&hose_spinlock);
84 phb->global_number = global_phb_number++;
85 list_add_tail(&phb->list_node, &hose_list);
86 spin_unlock(&hose_spinlock);
87 phb->dn = dev;
88 phb->is_dynamic = mem_init_done;
89 return phb;
90}
91
92void pcibios_free_controller(struct pci_controller *phb)
93{
94 spin_lock(&hose_spinlock);
95 list_del(&phb->list_node);
96 spin_unlock(&hose_spinlock);
97
98 if (phb->is_dynamic)
99 kfree(phb);
100}
101
102static resource_size_t pcibios_io_size(const struct pci_controller *hose)
103{
104 return hose->io_resource.end - hose->io_resource.start + 1;
105}
106
107int pcibios_vaddr_is_ioport(void __iomem *address)
108{
109 int ret = 0;
110 struct pci_controller *hose;
111 resource_size_t size;
112
113 spin_lock(&hose_spinlock);
114 list_for_each_entry(hose, &hose_list, list_node) {
115 size = pcibios_io_size(hose);
116 if (address >= hose->io_base_virt &&
117 address < (hose->io_base_virt + size)) {
118 ret = 1;
119 break;
120 }
121 }
122 spin_unlock(&hose_spinlock);
123 return ret;
124}
125
126unsigned long pci_address_to_pio(phys_addr_t address)
127{
128 struct pci_controller *hose;
129 resource_size_t size;
130 unsigned long ret = ~0;
131
132 spin_lock(&hose_spinlock);
133 list_for_each_entry(hose, &hose_list, list_node) {
134 size = pcibios_io_size(hose);
135 if (address >= hose->io_base_phys &&
136 address < (hose->io_base_phys + size)) {
137 unsigned long base =
138 (unsigned long)hose->io_base_virt - _IO_BASE;
139 ret = base + (address - hose->io_base_phys);
140 break;
141 }
142 }
143 spin_unlock(&hose_spinlock);
144
145 return ret;
146}
147EXPORT_SYMBOL_GPL(pci_address_to_pio);
148
149/*
150 * Return the domain number for this bus.
151 */
152int pci_domain_nr(struct pci_bus *bus)
153{
154 struct pci_controller *hose = pci_bus_to_host(bus);
155
156 return hose->global_number;
157}
158EXPORT_SYMBOL(pci_domain_nr);
159
160/* This routine is meant to be used early during boot, when the
161 * PCI bus numbers have not yet been assigned, and you need to
162 * issue PCI config cycles to an OF device.
163 * It could also be used to "fix" RTAS config cycles if you want
164 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
165 * config cycles.
166 */
167struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
168{
169 while (node) {
170 struct pci_controller *hose, *tmp;
171 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
172 if (hose->dn == node)
173 return hose;
174 node = node->parent;
175 }
176 return NULL;
177}
178
179static ssize_t pci_show_devspec(struct device *dev,
180 struct device_attribute *attr, char *buf)
181{
182 struct pci_dev *pdev;
183 struct device_node *np;
184
185 pdev = to_pci_dev(dev);
186 np = pci_device_to_OF_node(pdev);
187 if (np == NULL || np->full_name == NULL)
188 return 0;
189 return sprintf(buf, "%s", np->full_name);
190}
191static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
192
193/* Add sysfs properties */
194int pcibios_add_platform_entries(struct pci_dev *pdev)
195{
196 return device_create_file(&pdev->dev, &dev_attr_devspec);
197}
198
199char __devinit *pcibios_setup(char *str)
200{
201 return str;
202}
203
204/*
205 * Reads the interrupt pin to determine if interrupt is use by card.
206 * If the interrupt is used, then gets the interrupt line from the
207 * openfirmware and sets it in the pci_dev and pci_config line.
208 */
209int pci_read_irq_line(struct pci_dev *pci_dev)
210{
211 struct of_irq oirq;
212 unsigned int virq;
213
214 /* The current device-tree that iSeries generates from the HV
215 * PCI informations doesn't contain proper interrupt routing,
216 * and all the fallback would do is print out crap, so we
217 * don't attempt to resolve the interrupts here at all, some
218 * iSeries specific fixup does it.
219 *
220 * In the long run, we will hopefully fix the generated device-tree
221 * instead.
222 */
223 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
224
225#ifdef DEBUG
226 memset(&oirq, 0xff, sizeof(oirq));
227#endif
228 /* Try to get a mapping from the device-tree */
229 if (of_irq_map_pci(pci_dev, &oirq)) {
230 u8 line, pin;
231
232 /* If that fails, lets fallback to what is in the config
233 * space and map that through the default controller. We
234 * also set the type to level low since that's what PCI
235 * interrupts are. If your platform does differently, then
236 * either provide a proper interrupt tree or don't use this
237 * function.
238 */
239 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
240 return -1;
241 if (pin == 0)
242 return -1;
243 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
244 line == 0xff || line == 0) {
245 return -1;
246 }
247 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
248 line, pin);
249
250 virq = irq_create_mapping(NULL, line);
251 if (virq != NO_IRQ)
252 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
253 } else {
254 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
255 oirq.size, oirq.specifier[0], oirq.specifier[1],
256 oirq.controller ? oirq.controller->full_name :
257 "<default>");
258
259 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
260 oirq.size);
261 }
262 if (virq == NO_IRQ) {
263 pr_debug(" Failed to map !\n");
264 return -1;
265 }
266
267 pr_debug(" Mapped to linux irq %d\n", virq);
268
269 pci_dev->irq = virq;
270
271 return 0;
272}
273EXPORT_SYMBOL(pci_read_irq_line);
274
275/*
276 * Platform support for /proc/bus/pci/X/Y mmap()s,
277 * modelled on the sparc64 implementation by Dave Miller.
278 * -- paulus.
279 */
280
281/*
282 * Adjust vm_pgoff of VMA such that it is the physical page offset
283 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
284 *
285 * Basically, the user finds the base address for his device which he wishes
286 * to mmap. They read the 32-bit value from the config space base register,
287 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
288 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
289 *
290 * Returns negative error code on failure, zero on success.
291 */
292static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
293 resource_size_t *offset,
294 enum pci_mmap_state mmap_state)
295{
296 struct pci_controller *hose = pci_bus_to_host(dev->bus);
297 unsigned long io_offset = 0;
298 int i, res_bit;
299
300 if (hose == 0)
301 return NULL; /* should never happen */
302
303 /* If memory, add on the PCI bridge address offset */
304 if (mmap_state == pci_mmap_mem) {
305#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
306 *offset += hose->pci_mem_offset;
307#endif
308 res_bit = IORESOURCE_MEM;
309 } else {
310 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
311 *offset += io_offset;
312 res_bit = IORESOURCE_IO;
313 }
314
315 /*
316 * Check that the offset requested corresponds to one of the
317 * resources of the device.
318 */
319 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
320 struct resource *rp = &dev->resource[i];
321 int flags = rp->flags;
322
323 /* treat ROM as memory (should be already) */
324 if (i == PCI_ROM_RESOURCE)
325 flags |= IORESOURCE_MEM;
326
327 /* Active and same type? */
328 if ((flags & res_bit) == 0)
329 continue;
330
331 /* In the range of this resource? */
332 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
333 continue;
334
335 /* found it! construct the final physical address */
336 if (mmap_state == pci_mmap_io)
337 *offset += hose->io_base_phys - io_offset;
338 return rp;
339 }
340
341 return NULL;
342}
343
344/*
345 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
346 * device mapping.
347 */
348static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
349 pgprot_t protection,
350 enum pci_mmap_state mmap_state,
351 int write_combine)
352{
353 pgprot_t prot = protection;
354
355 /* Write combine is always 0 on non-memory space mappings. On
356 * memory space, if the user didn't pass 1, we check for a
357 * "prefetchable" resource. This is a bit hackish, but we use
358 * this to workaround the inability of /sysfs to provide a write
359 * combine bit
360 */
361 if (mmap_state != pci_mmap_mem)
362 write_combine = 0;
363 else if (write_combine == 0) {
364 if (rp->flags & IORESOURCE_PREFETCH)
365 write_combine = 1;
366 }
367
368 return pgprot_noncached(prot);
369}
370
371/*
372 * This one is used by /dev/mem and fbdev who have no clue about the
373 * PCI device, it tries to find the PCI device first and calls the
374 * above routine
375 */
376pgprot_t pci_phys_mem_access_prot(struct file *file,
377 unsigned long pfn,
378 unsigned long size,
379 pgprot_t prot)
380{
381 struct pci_dev *pdev = NULL;
382 struct resource *found = NULL;
383 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
384 int i;
385
386 if (page_is_ram(pfn))
387 return prot;
388
389 prot = pgprot_noncached(prot);
390 for_each_pci_dev(pdev) {
391 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
392 struct resource *rp = &pdev->resource[i];
393 int flags = rp->flags;
394
395 /* Active and same type? */
396 if ((flags & IORESOURCE_MEM) == 0)
397 continue;
398 /* In the range of this resource? */
399 if (offset < (rp->start & PAGE_MASK) ||
400 offset > rp->end)
401 continue;
402 found = rp;
403 break;
404 }
405 if (found)
406 break;
407 }
408 if (found) {
409 if (found->flags & IORESOURCE_PREFETCH)
410 prot = pgprot_noncached_wc(prot);
411 pci_dev_put(pdev);
412 }
413
414 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
415 (unsigned long long)offset, pgprot_val(prot));
416
417 return prot;
418}
419
420/*
421 * Perform the actual remap of the pages for a PCI device mapping, as
422 * appropriate for this architecture. The region in the process to map
423 * is described by vm_start and vm_end members of VMA, the base physical
424 * address is found in vm_pgoff.
425 * The pci device structure is provided so that architectures may make mapping
426 * decisions on a per-device or per-bus basis.
427 *
428 * Returns a negative error code on failure, zero on success.
429 */
430int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
431 enum pci_mmap_state mmap_state, int write_combine)
432{
433 resource_size_t offset =
434 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
435 struct resource *rp;
436 int ret;
437
438 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
439 if (rp == NULL)
440 return -EINVAL;
441
442 vma->vm_pgoff = offset >> PAGE_SHIFT;
443 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
444 vma->vm_page_prot,
445 mmap_state, write_combine);
446
447 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
448 vma->vm_end - vma->vm_start, vma->vm_page_prot);
449
450 return ret;
451}
452
453/* This provides legacy IO read access on a bus */
454int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
455{
456 unsigned long offset;
457 struct pci_controller *hose = pci_bus_to_host(bus);
458 struct resource *rp = &hose->io_resource;
459 void __iomem *addr;
460
461 /* Check if port can be supported by that bus. We only check
462 * the ranges of the PHB though, not the bus itself as the rules
463 * for forwarding legacy cycles down bridges are not our problem
464 * here. So if the host bridge supports it, we do it.
465 */
466 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
467 offset += port;
468
469 if (!(rp->flags & IORESOURCE_IO))
470 return -ENXIO;
471 if (offset < rp->start || (offset + size) > rp->end)
472 return -ENXIO;
473 addr = hose->io_base_virt + port;
474
475 switch (size) {
476 case 1:
477 *((u8 *)val) = in_8(addr);
478 return 1;
479 case 2:
480 if (port & 1)
481 return -EINVAL;
482 *((u16 *)val) = in_le16(addr);
483 return 2;
484 case 4:
485 if (port & 3)
486 return -EINVAL;
487 *((u32 *)val) = in_le32(addr);
488 return 4;
489 }
490 return -EINVAL;
491}
492
493/* This provides legacy IO write access on a bus */
494int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
495{
496 unsigned long offset;
497 struct pci_controller *hose = pci_bus_to_host(bus);
498 struct resource *rp = &hose->io_resource;
499 void __iomem *addr;
500
501 /* Check if port can be supported by that bus. We only check
502 * the ranges of the PHB though, not the bus itself as the rules
503 * for forwarding legacy cycles down bridges are not our problem
504 * here. So if the host bridge supports it, we do it.
505 */
506 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
507 offset += port;
508
509 if (!(rp->flags & IORESOURCE_IO))
510 return -ENXIO;
511 if (offset < rp->start || (offset + size) > rp->end)
512 return -ENXIO;
513 addr = hose->io_base_virt + port;
514
515 /* WARNING: The generic code is idiotic. It gets passed a pointer
516 * to what can be a 1, 2 or 4 byte quantity and always reads that
517 * as a u32, which means that we have to correct the location of
518 * the data read within those 32 bits for size 1 and 2
519 */
520 switch (size) {
521 case 1:
522 out_8(addr, val >> 24);
523 return 1;
524 case 2:
525 if (port & 1)
526 return -EINVAL;
527 out_le16(addr, val >> 16);
528 return 2;
529 case 4:
530 if (port & 3)
531 return -EINVAL;
532 out_le32(addr, val);
533 return 4;
534 }
535 return -EINVAL;
536}
537
538/* This provides legacy IO or memory mmap access on a bus */
539int pci_mmap_legacy_page_range(struct pci_bus *bus,
540 struct vm_area_struct *vma,
541 enum pci_mmap_state mmap_state)
542{
543 struct pci_controller *hose = pci_bus_to_host(bus);
544 resource_size_t offset =
545 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
546 resource_size_t size = vma->vm_end - vma->vm_start;
547 struct resource *rp;
548
549 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
550 pci_domain_nr(bus), bus->number,
551 mmap_state == pci_mmap_mem ? "MEM" : "IO",
552 (unsigned long long)offset,
553 (unsigned long long)(offset + size - 1));
554
555 if (mmap_state == pci_mmap_mem) {
556 /* Hack alert !
557 *
558 * Because X is lame and can fail starting if it gets an error
559 * trying to mmap legacy_mem (instead of just moving on without
560 * legacy memory access) we fake it here by giving it anonymous
561 * memory, effectively behaving just like /dev/zero
562 */
563 if ((offset + size) > hose->isa_mem_size) {
564#ifdef CONFIG_MMU
565 printk(KERN_DEBUG
566 "Process %s (pid:%d) mapped non-existing PCI"
567 "legacy memory for 0%04x:%02x\n",
568 current->comm, current->pid, pci_domain_nr(bus),
569 bus->number);
570#endif
571 if (vma->vm_flags & VM_SHARED)
572 return shmem_zero_setup(vma);
573 return 0;
574 }
575 offset += hose->isa_mem_phys;
576 } else {
577 unsigned long io_offset = (unsigned long)hose->io_base_virt - \
578 _IO_BASE;
579 unsigned long roffset = offset + io_offset;
580 rp = &hose->io_resource;
581 if (!(rp->flags & IORESOURCE_IO))
582 return -ENXIO;
583 if (roffset < rp->start || (roffset + size) > rp->end)
584 return -ENXIO;
585 offset += hose->io_base_phys;
586 }
587 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
588
589 vma->vm_pgoff = offset >> PAGE_SHIFT;
590 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
591 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
592 vma->vm_end - vma->vm_start,
593 vma->vm_page_prot);
594}
595
596void pci_resource_to_user(const struct pci_dev *dev, int bar,
597 const struct resource *rsrc,
598 resource_size_t *start, resource_size_t *end)
599{
600 struct pci_controller *hose = pci_bus_to_host(dev->bus);
601 resource_size_t offset = 0;
602
603 if (hose == NULL)
604 return;
605
606 if (rsrc->flags & IORESOURCE_IO)
607 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
608
609 /* We pass a fully fixed up address to userland for MMIO instead of
610 * a BAR value because X is lame and expects to be able to use that
611 * to pass to /dev/mem !
612 *
613 * That means that we'll have potentially 64 bits values where some
614 * userland apps only expect 32 (like X itself since it thinks only
615 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
616 * 32 bits CHRPs :-(
617 *
618 * Hopefully, the sysfs insterface is immune to that gunk. Once X
619 * has been fixed (and the fix spread enough), we can re-enable the
620 * 2 lines below and pass down a BAR value to userland. In that case
621 * we'll also have to re-enable the matching code in
622 * __pci_mmap_make_offset().
623 *
624 * BenH.
625 */
626#if 0
627 else if (rsrc->flags & IORESOURCE_MEM)
628 offset = hose->pci_mem_offset;
629#endif
630
631 *start = rsrc->start - offset;
632 *end = rsrc->end - offset;
633}
634
635/**
636 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
637 * @hose: newly allocated pci_controller to be setup
638 * @dev: device node of the host bridge
639 * @primary: set if primary bus (32 bits only, soon to be deprecated)
640 *
641 * This function will parse the "ranges" property of a PCI host bridge device
642 * node and setup the resource mapping of a pci controller based on its
643 * content.
644 *
645 * Life would be boring if it wasn't for a few issues that we have to deal
646 * with here:
647 *
648 * - We can only cope with one IO space range and up to 3 Memory space
649 * ranges. However, some machines (thanks Apple !) tend to split their
650 * space into lots of small contiguous ranges. So we have to coalesce.
651 *
652 * - We can only cope with all memory ranges having the same offset
653 * between CPU addresses and PCI addresses. Unfortunately, some bridges
654 * are setup for a large 1:1 mapping along with a small "window" which
655 * maps PCI address 0 to some arbitrary high address of the CPU space in
656 * order to give access to the ISA memory hole.
657 * The way out of here that I've chosen for now is to always set the
658 * offset based on the first resource found, then override it if we
659 * have a different offset and the previous was set by an ISA hole.
660 *
661 * - Some busses have IO space not starting at 0, which causes trouble with
662 * the way we do our IO resource renumbering. The code somewhat deals with
663 * it for 64 bits but I would expect problems on 32 bits.
664 *
665 * - Some 32 bits platforms such as 4xx can have physical space larger than
666 * 32 bits so we need to use 64 bits values for the parsing
667 */
668void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
669 struct device_node *dev,
670 int primary)
671{
672 const u32 *ranges;
673 int rlen;
674 int pna = of_n_addr_cells(dev);
675 int np = pna + 5;
676 int memno = 0, isa_hole = -1;
677 u32 pci_space;
678 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
679 unsigned long long isa_mb = 0;
680 struct resource *res;
681
682 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
683 dev->full_name, primary ? "(primary)" : "");
684
685 /* Get ranges property */
686 ranges = of_get_property(dev, "ranges", &rlen);
687 if (ranges == NULL)
688 return;
689
690 /* Parse it */
691 pr_debug("Parsing ranges property...\n");
692 while ((rlen -= np * 4) >= 0) {
693 /* Read next ranges element */
694 pci_space = ranges[0];
695 pci_addr = of_read_number(ranges + 1, 2);
696 cpu_addr = of_translate_address(dev, ranges + 3);
697 size = of_read_number(ranges + pna + 3, 2);
698
699 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
700 "cpu_addr:0x%016llx size:0x%016llx\n",
701 pci_space, pci_addr, cpu_addr, size);
702
703 ranges += np;
704
705 /* If we failed translation or got a zero-sized region
706 * (some FW try to feed us with non sensical zero sized regions
707 * such as power3 which look like some kind of attempt
708 * at exposing the VGA memory hole)
709 */
710 if (cpu_addr == OF_BAD_ADDR || size == 0)
711 continue;
712
713 /* Now consume following elements while they are contiguous */
714 for (; rlen >= np * sizeof(u32);
715 ranges += np, rlen -= np * 4) {
716 if (ranges[0] != pci_space)
717 break;
718 pci_next = of_read_number(ranges + 1, 2);
719 cpu_next = of_translate_address(dev, ranges + 3);
720 if (pci_next != pci_addr + size ||
721 cpu_next != cpu_addr + size)
722 break;
723 size += of_read_number(ranges + pna + 3, 2);
724 }
725
726 /* Act based on address space type */
727 res = NULL;
728 switch ((pci_space >> 24) & 0x3) {
729 case 1: /* PCI IO space */
730 printk(KERN_INFO
731 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
732 cpu_addr, cpu_addr + size - 1, pci_addr);
733
734 /* We support only one IO range */
735 if (hose->pci_io_size) {
736 printk(KERN_INFO
737 " \\--> Skipped (too many) !\n");
738 continue;
739 }
740 /* On 32 bits, limit I/O space to 16MB */
741 if (size > 0x01000000)
742 size = 0x01000000;
743
744 /* 32 bits needs to map IOs here */
745 hose->io_base_virt = ioremap(cpu_addr, size);
746
747 /* Expect trouble if pci_addr is not 0 */
748 if (primary)
749 isa_io_base =
750 (unsigned long)hose->io_base_virt;
751 /* pci_io_size and io_base_phys always represent IO
752 * space starting at 0 so we factor in pci_addr
753 */
754 hose->pci_io_size = pci_addr + size;
755 hose->io_base_phys = cpu_addr - pci_addr;
756
757 /* Build resource */
758 res = &hose->io_resource;
759 res->flags = IORESOURCE_IO;
760 res->start = pci_addr;
761 break;
762 case 2: /* PCI Memory space */
763 case 3: /* PCI 64 bits Memory space */
764 printk(KERN_INFO
765 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
766 cpu_addr, cpu_addr + size - 1, pci_addr,
767 (pci_space & 0x40000000) ? "Prefetch" : "");
768
769 /* We support only 3 memory ranges */
770 if (memno >= 3) {
771 printk(KERN_INFO
772 " \\--> Skipped (too many) !\n");
773 continue;
774 }
775 /* Handles ISA memory hole space here */
776 if (pci_addr == 0) {
777 isa_mb = cpu_addr;
778 isa_hole = memno;
779 if (primary || isa_mem_base == 0)
780 isa_mem_base = cpu_addr;
781 hose->isa_mem_phys = cpu_addr;
782 hose->isa_mem_size = size;
783 }
784
785 /* We get the PCI/Mem offset from the first range or
786 * the, current one if the offset came from an ISA
787 * hole. If they don't match, bugger.
788 */
789 if (memno == 0 ||
790 (isa_hole >= 0 && pci_addr != 0 &&
791 hose->pci_mem_offset == isa_mb))
792 hose->pci_mem_offset = cpu_addr - pci_addr;
793 else if (pci_addr != 0 &&
794 hose->pci_mem_offset != cpu_addr - pci_addr) {
795 printk(KERN_INFO
796 " \\--> Skipped (offset mismatch) !\n");
797 continue;
798 }
799
800 /* Build resource */
801 res = &hose->mem_resources[memno++];
802 res->flags = IORESOURCE_MEM;
803 if (pci_space & 0x40000000)
804 res->flags |= IORESOURCE_PREFETCH;
805 res->start = cpu_addr;
806 break;
807 }
808 if (res != NULL) {
809 res->name = dev->full_name;
810 res->end = res->start + size - 1;
811 res->parent = NULL;
812 res->sibling = NULL;
813 res->child = NULL;
814 }
815 }
816
817 /* If there's an ISA hole and the pci_mem_offset is -not- matching
818 * the ISA hole offset, then we need to remove the ISA hole from
819 * the resource list for that brige
820 */
821 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
822 unsigned int next = isa_hole + 1;
823 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
824 if (next < memno)
825 memmove(&hose->mem_resources[isa_hole],
826 &hose->mem_resources[next],
827 sizeof(struct resource) * (memno - next));
828 hose->mem_resources[--memno].flags = 0;
829 }
830}
831
832/* Decide whether to display the domain number in /proc */
833int pci_proc_domain(struct pci_bus *bus)
834{
835 struct pci_controller *hose = pci_bus_to_host(bus);
836
837 if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS))
838 return 0;
839 if (pci_flags & PCI_COMPAT_DOMAIN_0)
840 return hose->global_number != 0;
841 return 1;
842}
843
844void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
845 struct resource *res)
846{
847 resource_size_t offset = 0, mask = (resource_size_t)-1;
848 struct pci_controller *hose = pci_bus_to_host(dev->bus);
849
850 if (!hose)
851 return;
852 if (res->flags & IORESOURCE_IO) {
853 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
854 mask = 0xffffffffu;
855 } else if (res->flags & IORESOURCE_MEM)
856 offset = hose->pci_mem_offset;
857
858 region->start = (res->start - offset) & mask;
859 region->end = (res->end - offset) & mask;
860}
861EXPORT_SYMBOL(pcibios_resource_to_bus);
862
863void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
864 struct pci_bus_region *region)
865{
866 resource_size_t offset = 0, mask = (resource_size_t)-1;
867 struct pci_controller *hose = pci_bus_to_host(dev->bus);
868
869 if (!hose)
870 return;
871 if (res->flags & IORESOURCE_IO) {
872 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
873 mask = 0xffffffffu;
874 } else if (res->flags & IORESOURCE_MEM)
875 offset = hose->pci_mem_offset;
876 res->start = (region->start + offset) & mask;
877 res->end = (region->end + offset) & mask;
878}
879EXPORT_SYMBOL(pcibios_bus_to_resource);
880
881/* Fixup a bus resource into a linux resource */
882static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
883{
884 struct pci_controller *hose = pci_bus_to_host(dev->bus);
885 resource_size_t offset = 0, mask = (resource_size_t)-1;
886
887 if (res->flags & IORESOURCE_IO) {
888 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
889 mask = 0xffffffffu;
890 } else if (res->flags & IORESOURCE_MEM)
891 offset = hose->pci_mem_offset;
892
893 res->start = (res->start + offset) & mask;
894 res->end = (res->end + offset) & mask;
895}
896
897/* This header fixup will do the resource fixup for all devices as they are
898 * probed, but not for bridge ranges
899 */
900static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
901{
902 struct pci_controller *hose = pci_bus_to_host(dev->bus);
903 int i;
904
905 if (!hose) {
906 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
907 pci_name(dev));
908 return;
909 }
910 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
911 struct resource *res = dev->resource + i;
912 if (!res->flags)
913 continue;
914 /* On platforms that have PCI_PROBE_ONLY set, we don't
915 * consider 0 as an unassigned BAR value. It's technically
916 * a valid value, but linux doesn't like it... so when we can
917 * re-assign things, we do so, but if we can't, we keep it
918 * around and hope for the best...
919 */
920 if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) {
921 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
922 "is unassigned\n",
923 pci_name(dev), i,
924 (unsigned long long)res->start,
925 (unsigned long long)res->end,
926 (unsigned int)res->flags);
927 res->end -= res->start;
928 res->start = 0;
929 res->flags |= IORESOURCE_UNSET;
930 continue;
931 }
932
933 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
934 pci_name(dev), i,
935 (unsigned long long)res->start,\
936 (unsigned long long)res->end,
937 (unsigned int)res->flags);
938
939 fixup_resource(res, dev);
940
941 pr_debug("PCI:%s %016llx-%016llx\n",
942 pci_name(dev),
943 (unsigned long long)res->start,
944 (unsigned long long)res->end);
945 }
946}
947DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
948
949/* This function tries to figure out if a bridge resource has been initialized
950 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
951 * things go more smoothly when it gets it right. It should covers cases such
952 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
953 */
954static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
955 struct resource *res)
956{
957 struct pci_controller *hose = pci_bus_to_host(bus);
958 struct pci_dev *dev = bus->self;
959 resource_size_t offset;
960 u16 command;
961 int i;
962
963 /* We don't do anything if PCI_PROBE_ONLY is set */
964 if (pci_flags & PCI_PROBE_ONLY)
965 return 0;
966
967 /* Job is a bit different between memory and IO */
968 if (res->flags & IORESOURCE_MEM) {
969 /* If the BAR is non-0 (res != pci_mem_offset) then it's
970 * probably been initialized by somebody
971 */
972 if (res->start != hose->pci_mem_offset)
973 return 0;
974
975 /* The BAR is 0, let's check if memory decoding is enabled on
976 * the bridge. If not, we consider it unassigned
977 */
978 pci_read_config_word(dev, PCI_COMMAND, &command);
979 if ((command & PCI_COMMAND_MEMORY) == 0)
980 return 1;
981
982 /* Memory decoding is enabled and the BAR is 0. If any of
983 * the bridge resources covers that starting address (0 then
984 * it's good enough for us for memory
985 */
986 for (i = 0; i < 3; i++) {
987 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
988 hose->mem_resources[i].start == hose->pci_mem_offset)
989 return 0;
990 }
991
992 /* Well, it starts at 0 and we know it will collide so we may as
993 * well consider it as unassigned. That covers the Apple case.
994 */
995 return 1;
996 } else {
997 /* If the BAR is non-0, then we consider it assigned */
998 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
999 if (((res->start - offset) & 0xfffffffful) != 0)
1000 return 0;
1001
1002 /* Here, we are a bit different than memory as typically IO
1003 * space starting at low addresses -is- valid. What we do
1004 * instead if that we consider as unassigned anything that
1005 * doesn't have IO enabled in the PCI command register,
1006 * and that's it.
1007 */
1008 pci_read_config_word(dev, PCI_COMMAND, &command);
1009 if (command & PCI_COMMAND_IO)
1010 return 0;
1011
1012 /* It's starting at 0 and IO is disabled in the bridge, consider
1013 * it unassigned
1014 */
1015 return 1;
1016 }
1017}
1018
1019/* Fixup resources of a PCI<->PCI bridge */
1020static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1021{
1022 struct resource *res;
1023 int i;
1024
1025 struct pci_dev *dev = bus->self;
1026
1027 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1028 res = bus->resource[i];
1029 if (!res)
1030 continue;
1031 if (!res->flags)
1032 continue;
1033 if (i >= 3 && bus->self->transparent)
1034 continue;
1035
1036 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
1037 pci_name(dev), i,
1038 (unsigned long long)res->start,\
1039 (unsigned long long)res->end,
1040 (unsigned int)res->flags);
1041
1042 /* Perform fixup */
1043 fixup_resource(res, dev);
1044
1045 /* Try to detect uninitialized P2P bridge resources,
1046 * and clear them out so they get re-assigned later
1047 */
1048 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1049 res->flags = 0;
1050 pr_debug("PCI:%s (unassigned)\n",
1051 pci_name(dev));
1052 } else {
1053 pr_debug("PCI:%s %016llx-%016llx\n",
1054 pci_name(dev),
1055 (unsigned long long)res->start,
1056 (unsigned long long)res->end);
1057 }
1058 }
1059}
1060
1061void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1062{
1063 /* Fix up the bus resources for P2P bridges */
1064 if (bus->self != NULL)
1065 pcibios_fixup_bridge(bus);
1066}
1067
1068void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1069{
1070 struct pci_dev *dev;
1071
1072 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1073 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1074
1075 list_for_each_entry(dev, &bus->devices, bus_list) {
1076 struct dev_archdata *sd = &dev->dev.archdata;
1077
1078 /* Setup OF node pointer in archdata */
1079 sd->of_node = pci_device_to_OF_node(dev);
1080
1081 /* Fixup NUMA node as it may not be setup yet by the generic
1082 * code and is needed by the DMA init
1083 */
1084 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1085
1086 /* Hook up default DMA ops */
1087 sd->dma_ops = pci_dma_ops;
1088 sd->dma_data = (void *)PCI_DRAM_OFFSET;
1089
1090 /* Read default IRQs and fixup if necessary */
1091 pci_read_irq_line(dev);
1092 }
1093}
1094
1095void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1096{
1097 /* When called from the generic PCI probe, read PCI<->PCI bridge
1098 * bases. This is -not- called when generating the PCI tree from
1099 * the OF device-tree.
1100 */
1101 if (bus->self != NULL)
1102 pci_read_bridge_bases(bus);
1103
1104 /* Now fixup the bus bus */
1105 pcibios_setup_bus_self(bus);
1106
1107 /* Now fixup devices on that bus */
1108 pcibios_setup_bus_devices(bus);
1109}
1110EXPORT_SYMBOL(pcibios_fixup_bus);
1111
1112static int skip_isa_ioresource_align(struct pci_dev *dev)
1113{
1114 if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) &&
1115 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1116 return 1;
1117 return 0;
1118}
1119
1120/*
1121 * We need to avoid collisions with `mirrored' VGA ports
1122 * and other strange ISA hardware, so we always want the
1123 * addresses to be allocated in the 0x000-0x0ff region
1124 * modulo 0x400.
1125 *
1126 * Why? Because some silly external IO cards only decode
1127 * the low 10 bits of the IO address. The 0x00-0xff region
1128 * is reserved for motherboard devices that decode all 16
1129 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1130 * but we want to try to avoid allocating at 0x2900-0x2bff
1131 * which might have be mirrored at 0x0100-0x03ff..
1132 */
1133void pcibios_align_resource(void *data, struct resource *res,
1134 resource_size_t size, resource_size_t align)
1135{
1136 struct pci_dev *dev = data;
1137
1138 if (res->flags & IORESOURCE_IO) {
1139 resource_size_t start = res->start;
1140
1141 if (skip_isa_ioresource_align(dev))
1142 return;
1143 if (start & 0x300) {
1144 start = (start + 0x3ff) & ~0x3ff;
1145 res->start = start;
1146 }
1147 }
1148}
1149EXPORT_SYMBOL(pcibios_align_resource);
1150
1151/*
1152 * Reparent resource children of pr that conflict with res
1153 * under res, and make res replace those children.
1154 */
1155static int __init reparent_resources(struct resource *parent,
1156 struct resource *res)
1157{
1158 struct resource *p, **pp;
1159 struct resource **firstpp = NULL;
1160
1161 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1162 if (p->end < res->start)
1163 continue;
1164 if (res->end < p->start)
1165 break;
1166 if (p->start < res->start || p->end > res->end)
1167 return -1; /* not completely contained */
1168 if (firstpp == NULL)
1169 firstpp = pp;
1170 }
1171 if (firstpp == NULL)
1172 return -1; /* didn't find any conflicting entries? */
1173 res->parent = parent;
1174 res->child = *firstpp;
1175 res->sibling = *pp;
1176 *firstpp = res;
1177 *pp = NULL;
1178 for (p = res->child; p != NULL; p = p->sibling) {
1179 p->parent = res;
1180 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1181 p->name,
1182 (unsigned long long)p->start,
1183 (unsigned long long)p->end, res->name);
1184 }
1185 return 0;
1186}
1187
1188/*
1189 * Handle resources of PCI devices. If the world were perfect, we could
1190 * just allocate all the resource regions and do nothing more. It isn't.
1191 * On the other hand, we cannot just re-allocate all devices, as it would
1192 * require us to know lots of host bridge internals. So we attempt to
1193 * keep as much of the original configuration as possible, but tweak it
1194 * when it's found to be wrong.
1195 *
1196 * Known BIOS problems we have to work around:
1197 * - I/O or memory regions not configured
1198 * - regions configured, but not enabled in the command register
1199 * - bogus I/O addresses above 64K used
1200 * - expansion ROMs left enabled (this may sound harmless, but given
1201 * the fact the PCI specs explicitly allow address decoders to be
1202 * shared between expansion ROMs and other resource regions, it's
1203 * at least dangerous)
1204 *
1205 * Our solution:
1206 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1207 * This gives us fixed barriers on where we can allocate.
1208 * (2) Allocate resources for all enabled devices. If there is
1209 * a collision, just mark the resource as unallocated. Also
1210 * disable expansion ROMs during this step.
1211 * (3) Try to allocate resources for disabled devices. If the
1212 * resources were assigned correctly, everything goes well,
1213 * if they weren't, they won't disturb allocation of other
1214 * resources.
1215 * (4) Assign new addresses to resources which were either
1216 * not configured at all or misconfigured. If explicitly
1217 * requested by the user, configure expansion ROM address
1218 * as well.
1219 */
1220
1221void pcibios_allocate_bus_resources(struct pci_bus *bus)
1222{
1223 struct pci_bus *b;
1224 int i;
1225 struct resource *res, *pr;
1226
1227 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1228 pci_domain_nr(bus), bus->number);
1229
1230 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1231 res = bus->resource[i];
1232 if (!res || !res->flags
1233 || res->start > res->end || res->parent)
1234 continue;
1235 if (bus->parent == NULL)
1236 pr = (res->flags & IORESOURCE_IO) ?
1237 &ioport_resource : &iomem_resource;
1238 else {
1239 /* Don't bother with non-root busses when
1240 * re-assigning all resources. We clear the
1241 * resource flags as if they were colliding
1242 * and as such ensure proper re-allocation
1243 * later.
1244 */
1245 if (pci_flags & PCI_REASSIGN_ALL_RSRC)
1246 goto clear_resource;
1247 pr = pci_find_parent_resource(bus->self, res);
1248 if (pr == res) {
1249 /* this happens when the generic PCI
1250 * code (wrongly) decides that this
1251 * bridge is transparent -- paulus
1252 */
1253 continue;
1254 }
1255 }
1256
1257 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1258 "[0x%x], parent %p (%s)\n",
1259 bus->self ? pci_name(bus->self) : "PHB",
1260 bus->number, i,
1261 (unsigned long long)res->start,
1262 (unsigned long long)res->end,
1263 (unsigned int)res->flags,
1264 pr, (pr && pr->name) ? pr->name : "nil");
1265
1266 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1267 if (request_resource(pr, res) == 0)
1268 continue;
1269 /*
1270 * Must be a conflict with an existing entry.
1271 * Move that entry (or entries) under the
1272 * bridge resource and try again.
1273 */
1274 if (reparent_resources(pr, res) == 0)
1275 continue;
1276 }
1277 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1278 "%d of PCI bridge %d, will remap\n", i, bus->number);
1279clear_resource:
1280 res->flags = 0;
1281 }
1282
1283 list_for_each_entry(b, &bus->children, node)
1284 pcibios_allocate_bus_resources(b);
1285}
1286
1287static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1288{
1289 struct resource *pr, *r = &dev->resource[idx];
1290
1291 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1292 pci_name(dev), idx,
1293 (unsigned long long)r->start,
1294 (unsigned long long)r->end,
1295 (unsigned int)r->flags);
1296
1297 pr = pci_find_parent_resource(dev, r);
1298 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1299 request_resource(pr, r) < 0) {
1300 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1301 " of device %s, will remap\n", idx, pci_name(dev));
1302 if (pr)
1303 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1304 pr,
1305 (unsigned long long)pr->start,
1306 (unsigned long long)pr->end,
1307 (unsigned int)pr->flags);
1308 /* We'll assign a new address later */
1309 r->flags |= IORESOURCE_UNSET;
1310 r->end -= r->start;
1311 r->start = 0;
1312 }
1313}
1314
1315static void __init pcibios_allocate_resources(int pass)
1316{
1317 struct pci_dev *dev = NULL;
1318 int idx, disabled;
1319 u16 command;
1320 struct resource *r;
1321
1322 for_each_pci_dev(dev) {
1323 pci_read_config_word(dev, PCI_COMMAND, &command);
1324 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1325 r = &dev->resource[idx];
1326 if (r->parent) /* Already allocated */
1327 continue;
1328 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1329 continue; /* Not assigned at all */
1330 /* We only allocate ROMs on pass 1 just in case they
1331 * have been screwed up by firmware
1332 */
1333 if (idx == PCI_ROM_RESOURCE)
1334 disabled = 1;
1335 if (r->flags & IORESOURCE_IO)
1336 disabled = !(command & PCI_COMMAND_IO);
1337 else
1338 disabled = !(command & PCI_COMMAND_MEMORY);
1339 if (pass == disabled)
1340 alloc_resource(dev, idx);
1341 }
1342 if (pass)
1343 continue;
1344 r = &dev->resource[PCI_ROM_RESOURCE];
1345 if (r->flags) {
1346 /* Turn the ROM off, leave the resource region,
1347 * but keep it unregistered.
1348 */
1349 u32 reg;
1350 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1351 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1352 pr_debug("PCI: Switching off ROM of %s\n",
1353 pci_name(dev));
1354 r->flags &= ~IORESOURCE_ROM_ENABLE;
1355 pci_write_config_dword(dev, dev->rom_base_reg,
1356 reg & ~PCI_ROM_ADDRESS_ENABLE);
1357 }
1358 }
1359 }
1360}
1361
1362static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1363{
1364 struct pci_controller *hose = pci_bus_to_host(bus);
1365 resource_size_t offset;
1366 struct resource *res, *pres;
1367 int i;
1368
1369 pr_debug("Reserving legacy ranges for domain %04x\n",
1370 pci_domain_nr(bus));
1371
1372 /* Check for IO */
1373 if (!(hose->io_resource.flags & IORESOURCE_IO))
1374 goto no_io;
1375 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1376 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1377 BUG_ON(res == NULL);
1378 res->name = "Legacy IO";
1379 res->flags = IORESOURCE_IO;
1380 res->start = offset;
1381 res->end = (offset + 0xfff) & 0xfffffffful;
1382 pr_debug("Candidate legacy IO: %pR\n", res);
1383 if (request_resource(&hose->io_resource, res)) {
1384 printk(KERN_DEBUG
1385 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1386 pci_domain_nr(bus), bus->number, res);
1387 kfree(res);
1388 }
1389
1390 no_io:
1391 /* Check for memory */
1392 offset = hose->pci_mem_offset;
1393 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1394 for (i = 0; i < 3; i++) {
1395 pres = &hose->mem_resources[i];
1396 if (!(pres->flags & IORESOURCE_MEM))
1397 continue;
1398 pr_debug("hose mem res: %pR\n", pres);
1399 if ((pres->start - offset) <= 0xa0000 &&
1400 (pres->end - offset) >= 0xbffff)
1401 break;
1402 }
1403 if (i >= 3)
1404 return;
1405 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1406 BUG_ON(res == NULL);
1407 res->name = "Legacy VGA memory";
1408 res->flags = IORESOURCE_MEM;
1409 res->start = 0xa0000 + offset;
1410 res->end = 0xbffff + offset;
1411 pr_debug("Candidate VGA memory: %pR\n", res);
1412 if (request_resource(pres, res)) {
1413 printk(KERN_DEBUG
1414 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1415 pci_domain_nr(bus), bus->number, res);
1416 kfree(res);
1417 }
1418}
1419
1420void __init pcibios_resource_survey(void)
1421{
1422 struct pci_bus *b;
1423
1424 /* Allocate and assign resources. If we re-assign everything, then
1425 * we skip the allocate phase
1426 */
1427 list_for_each_entry(b, &pci_root_buses, node)
1428 pcibios_allocate_bus_resources(b);
1429
1430 if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) {
1431 pcibios_allocate_resources(0);
1432 pcibios_allocate_resources(1);
1433 }
1434
1435 /* Before we start assigning unassigned resource, we try to reserve
1436 * the low IO area and the VGA memory area if they intersect the
1437 * bus available resources to avoid allocating things on top of them
1438 */
1439 if (!(pci_flags & PCI_PROBE_ONLY)) {
1440 list_for_each_entry(b, &pci_root_buses, node)
1441 pcibios_reserve_legacy_regions(b);
1442 }
1443
1444 /* Now, if the platform didn't decide to blindly trust the firmware,
1445 * we proceed to assigning things that were left unassigned
1446 */
1447 if (!(pci_flags & PCI_PROBE_ONLY)) {
1448 pr_debug("PCI: Assigning unassigned resources...\n");
1449 pci_assign_unassigned_resources();
1450 }
1451}
1452
1453#ifdef CONFIG_HOTPLUG
1454
1455/* This is used by the PCI hotplug driver to allocate resource
1456 * of newly plugged busses. We can try to consolidate with the
1457 * rest of the code later, for now, keep it as-is as our main
1458 * resource allocation function doesn't deal with sub-trees yet.
1459 */
1460void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1461{
1462 struct pci_dev *dev;
1463 struct pci_bus *child_bus;
1464
1465 list_for_each_entry(dev, &bus->devices, bus_list) {
1466 int i;
1467
1468 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1469 struct resource *r = &dev->resource[i];
1470
1471 if (r->parent || !r->start || !r->flags)
1472 continue;
1473
1474 pr_debug("PCI: Claiming %s: "
1475 "Resource %d: %016llx..%016llx [%x]\n",
1476 pci_name(dev), i,
1477 (unsigned long long)r->start,
1478 (unsigned long long)r->end,
1479 (unsigned int)r->flags);
1480
1481 pci_claim_resource(dev, i);
1482 }
1483 }
1484
1485 list_for_each_entry(child_bus, &bus->children, node)
1486 pcibios_claim_one_bus(child_bus);
1487}
1488EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1489
1490
1491/* pcibios_finish_adding_to_bus
1492 *
1493 * This is to be called by the hotplug code after devices have been
1494 * added to a bus, this include calling it for a PHB that is just
1495 * being added
1496 */
1497void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1498{
1499 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1500 pci_domain_nr(bus), bus->number);
1501
1502 /* Allocate bus and devices resources */
1503 pcibios_allocate_bus_resources(bus);
1504 pcibios_claim_one_bus(bus);
1505
1506 /* Add new devices to global lists. Register in proc, sysfs. */
1507 pci_bus_add_devices(bus);
1508
1509 /* Fixup EEH */
1510 eeh_add_device_tree_late(bus);
1511}
1512EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1513
1514#endif /* CONFIG_HOTPLUG */
1515
1516int pcibios_enable_device(struct pci_dev *dev, int mask)
1517{
1518 return pci_enable_resources(dev, mask);
1519}
1520
1521void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1522{
1523 struct pci_bus *bus = hose->bus;
1524 struct resource *res;
1525 int i;
1526
1527 /* Hookup PHB IO resource */
1528 bus->resource[0] = res = &hose->io_resource;
1529
1530 if (!res->flags) {
1531 printk(KERN_WARNING "PCI: I/O resource not set for host"
1532 " bridge %s (domain %d)\n",
1533 hose->dn->full_name, hose->global_number);
1534 /* Workaround for lack of IO resource only on 32-bit */
1535 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1536 res->end = res->start + IO_SPACE_LIMIT;
1537 res->flags = IORESOURCE_IO;
1538 }
1539
1540 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1541 (unsigned long long)res->start,
1542 (unsigned long long)res->end,
1543 (unsigned long)res->flags);
1544
1545 /* Hookup PHB Memory resources */
1546 for (i = 0; i < 3; ++i) {
1547 res = &hose->mem_resources[i];
1548 if (!res->flags) {
1549 if (i > 0)
1550 continue;
1551 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1552 "host bridge %s (domain %d)\n",
1553 hose->dn->full_name, hose->global_number);
1554
1555 /* Workaround for lack of MEM resource only on 32-bit */
1556 res->start = hose->pci_mem_offset;
1557 res->end = (resource_size_t)-1LL;
1558 res->flags = IORESOURCE_MEM;
1559
1560 }
1561 bus->resource[i+1] = res;
1562
1563 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1564 i, (unsigned long long)res->start,
1565 (unsigned long long)res->end,
1566 (unsigned long)res->flags);
1567 }
1568
1569 pr_debug("PCI: PHB MEM offset = %016llx\n",
1570 (unsigned long long)hose->pci_mem_offset);
1571 pr_debug("PCI: PHB IO offset = %08lx\n",
1572 (unsigned long)hose->io_base_virt - _IO_BASE);
1573}
1574
1575/*
1576 * Null PCI config access functions, for the case when we can't
1577 * find a hose.
1578 */
1579#define NULL_PCI_OP(rw, size, type) \
1580static int \
1581null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1582{ \
1583 return PCIBIOS_DEVICE_NOT_FOUND; \
1584}
1585
1586static int
1587null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1588 int len, u32 *val)
1589{
1590 return PCIBIOS_DEVICE_NOT_FOUND;
1591}
1592
1593static int
1594null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1595 int len, u32 val)
1596{
1597 return PCIBIOS_DEVICE_NOT_FOUND;
1598}
1599
1600static struct pci_ops null_pci_ops = {
1601 .read = null_read_config,
1602 .write = null_write_config,
1603};
1604
1605/*
1606 * These functions are used early on before PCI scanning is done
1607 * and all of the pci_dev and pci_bus structures have been created.
1608 */
1609static struct pci_bus *
1610fake_pci_bus(struct pci_controller *hose, int busnr)
1611{
1612 static struct pci_bus bus;
1613
1614 if (!hose)
1615 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1616
1617 bus.number = busnr;
1618 bus.sysdata = hose;
1619 bus.ops = hose ? hose->ops : &null_pci_ops;
1620 return &bus;
1621}
1622
1623#define EARLY_PCI_OP(rw, size, type) \
1624int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1625 int devfn, int offset, type value) \
1626{ \
1627 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1628 devfn, offset, value); \
1629}
1630
1631EARLY_PCI_OP(read, byte, u8 *)
1632EARLY_PCI_OP(read, word, u16 *)
1633EARLY_PCI_OP(read, dword, u32 *)
1634EARLY_PCI_OP(write, byte, u8)
1635EARLY_PCI_OP(write, word, u16)
1636EARLY_PCI_OP(write, dword, u32)
1637
1638int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1639 int cap)
1640{
1641 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1642}
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
new file mode 100644
index 000000000000..7e0c94f501cc
--- /dev/null
+++ b/arch/microblaze/pci/pci_32.c
@@ -0,0 +1,430 @@
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14#include <linux/irq.h>
15#include <linux/list.h>
16#include <linux/of.h>
17
18#include <asm/processor.h>
19#include <asm/io.h>
20#include <asm/prom.h>
21#include <asm/sections.h>
22#include <asm/pci-bridge.h>
23#include <asm/byteorder.h>
24#include <asm/uaccess.h>
25
26#undef DEBUG
27
28unsigned long isa_io_base;
29unsigned long pci_dram_offset;
30int pcibios_assign_bus_offset = 1;
31
32static u8 *pci_to_OF_bus_map;
33
34/* By default, we don't re-assign bus numbers. We do this only on
35 * some pmacs
36 */
37static int pci_assign_all_buses;
38
39static int pci_bus_count;
40
41/*
42 * Functions below are used on OpenFirmware machines.
43 */
44static void
45make_one_node_map(struct device_node *node, u8 pci_bus)
46{
47 const int *bus_range;
48 int len;
49
50 if (pci_bus >= pci_bus_count)
51 return;
52 bus_range = of_get_property(node, "bus-range", &len);
53 if (bus_range == NULL || len < 2 * sizeof(int)) {
54 printk(KERN_WARNING "Can't get bus-range for %s, "
55 "assuming it starts at 0\n", node->full_name);
56 pci_to_OF_bus_map[pci_bus] = 0;
57 } else
58 pci_to_OF_bus_map[pci_bus] = bus_range[0];
59
60 for_each_child_of_node(node, node) {
61 struct pci_dev *dev;
62 const unsigned int *class_code, *reg;
63
64 class_code = of_get_property(node, "class-code", NULL);
65 if (!class_code ||
66 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
67 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
68 continue;
69 reg = of_get_property(node, "reg", NULL);
70 if (!reg)
71 continue;
72 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
73 if (!dev || !dev->subordinate) {
74 pci_dev_put(dev);
75 continue;
76 }
77 make_one_node_map(node, dev->subordinate->number);
78 pci_dev_put(dev);
79 }
80}
81
82void
83pcibios_make_OF_bus_map(void)
84{
85 int i;
86 struct pci_controller *hose, *tmp;
87 struct property *map_prop;
88 struct device_node *dn;
89
90 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
91 if (!pci_to_OF_bus_map) {
92 printk(KERN_ERR "Can't allocate OF bus map !\n");
93 return;
94 }
95
96 /* We fill the bus map with invalid values, that helps
97 * debugging.
98 */
99 for (i = 0; i < pci_bus_count; i++)
100 pci_to_OF_bus_map[i] = 0xff;
101
102 /* For each hose, we begin searching bridges */
103 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
104 struct device_node *node = hose->dn;
105
106 if (!node)
107 continue;
108 make_one_node_map(node, hose->first_busno);
109 }
110 dn = of_find_node_by_path("/");
111 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
112 if (map_prop) {
113 BUG_ON(pci_bus_count > map_prop->length);
114 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
115 }
116 of_node_put(dn);
117#ifdef DEBUG
118 printk(KERN_INFO "PCI->OF bus map:\n");
119 for (i = 0; i < pci_bus_count; i++) {
120 if (pci_to_OF_bus_map[i] == 0xff)
121 continue;
122 printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
123 }
124#endif
125}
126
127typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
128
129static struct device_node *scan_OF_pci_childs(struct device_node *parent,
130 pci_OF_scan_iterator filter, void *data)
131{
132 struct device_node *node;
133 struct device_node *sub_node;
134
135 for_each_child_of_node(parent, node) {
136 const unsigned int *class_code;
137
138 if (filter(node, data)) {
139 of_node_put(node);
140 return node;
141 }
142
143 /* For PCI<->PCI bridges or CardBus bridges, we go down
144 * Note: some OFs create a parent node "multifunc-device" as
145 * a fake root for all functions of a multi-function device,
146 * we go down them as well.
147 */
148 class_code = of_get_property(node, "class-code", NULL);
149 if ((!class_code ||
150 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
151 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
152 strcmp(node->name, "multifunc-device"))
153 continue;
154 sub_node = scan_OF_pci_childs(node, filter, data);
155 if (sub_node) {
156 of_node_put(node);
157 return sub_node;
158 }
159 }
160 return NULL;
161}
162
163static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
164 unsigned int devfn)
165{
166 struct device_node *np, *cnp;
167 const u32 *reg;
168 unsigned int psize;
169
170 for_each_child_of_node(parent, np) {
171 reg = of_get_property(np, "reg", &psize);
172 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
173 return np;
174
175 /* Note: some OFs create a parent node "multifunc-device" as
176 * a fake root for all functions of a multi-function device,
177 * we go down them as well. */
178 if (!strcmp(np->name, "multifunc-device")) {
179 cnp = scan_OF_for_pci_dev(np, devfn);
180 if (cnp)
181 return cnp;
182 }
183 }
184 return NULL;
185}
186
187
188static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
189{
190 struct device_node *parent, *np;
191
192 /* Are we a root bus ? */
193 if (bus->self == NULL || bus->parent == NULL) {
194 struct pci_controller *hose = pci_bus_to_host(bus);
195 if (hose == NULL)
196 return NULL;
197 return of_node_get(hose->dn);
198 }
199
200 /* not a root bus, we need to get our parent */
201 parent = scan_OF_for_pci_bus(bus->parent);
202 if (parent == NULL)
203 return NULL;
204
205 /* now iterate for children for a match */
206 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
207 of_node_put(parent);
208
209 return np;
210}
211
212/*
213 * Scans the OF tree for a device node matching a PCI device
214 */
215struct device_node *
216pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
217{
218 struct device_node *parent, *np;
219
220 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
221 parent = scan_OF_for_pci_bus(bus);
222 if (parent == NULL)
223 return NULL;
224 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
225 np = scan_OF_for_pci_dev(parent, devfn);
226 of_node_put(parent);
227 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
228
229 /* XXX most callers don't release the returned node
230 * mostly because ppc64 doesn't increase the refcount,
231 * we need to fix that.
232 */
233 return np;
234}
235EXPORT_SYMBOL(pci_busdev_to_OF_node);
236
237struct device_node*
238pci_device_to_OF_node(struct pci_dev *dev)
239{
240 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
241}
242EXPORT_SYMBOL(pci_device_to_OF_node);
243
244static int
245find_OF_pci_device_filter(struct device_node *node, void *data)
246{
247 return ((void *)node == data);
248}
249
250/*
251 * Returns the PCI device matching a given OF node
252 */
253int
254pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
255{
256 const unsigned int *reg;
257 struct pci_controller *hose;
258 struct pci_dev *dev = NULL;
259
260 /* Make sure it's really a PCI device */
261 hose = pci_find_hose_for_OF_device(node);
262 if (!hose || !hose->dn)
263 return -ENODEV;
264 if (!scan_OF_pci_childs(hose->dn,
265 find_OF_pci_device_filter, (void *)node))
266 return -ENODEV;
267 reg = of_get_property(node, "reg", NULL);
268 if (!reg)
269 return -ENODEV;
270 *bus = (reg[0] >> 16) & 0xff;
271 *devfn = ((reg[0] >> 8) & 0xff);
272
273 /* Ok, here we need some tweak. If we have already renumbered
274 * all busses, we can't rely on the OF bus number any more.
275 * the pci_to_OF_bus_map is not enough as several PCI busses
276 * may match the same OF bus number.
277 */
278 if (!pci_to_OF_bus_map)
279 return 0;
280
281 for_each_pci_dev(dev)
282 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
283 dev->devfn == *devfn) {
284 *bus = dev->bus->number;
285 pci_dev_put(dev);
286 return 0;
287 }
288
289 return -ENODEV;
290}
291EXPORT_SYMBOL(pci_device_from_OF_node);
292
293/* We create the "pci-OF-bus-map" property now so it appears in the
294 * /proc device tree
295 */
296void __init
297pci_create_OF_bus_map(void)
298{
299 struct property *of_prop;
300 struct device_node *dn;
301
302 of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
303 256);
304 if (!of_prop)
305 return;
306 dn = of_find_node_by_path("/");
307 if (dn) {
308 memset(of_prop, -1, sizeof(struct property) + 256);
309 of_prop->name = "pci-OF-bus-map";
310 of_prop->length = 256;
311 of_prop->value = &of_prop[1];
312 prom_add_property(dn, of_prop);
313 of_node_put(dn);
314 }
315}
316
317static void __devinit pcibios_scan_phb(struct pci_controller *hose)
318{
319 struct pci_bus *bus;
320 struct device_node *node = hose->dn;
321 unsigned long io_offset;
322 struct resource *res = &hose->io_resource;
323
324 pr_debug("PCI: Scanning PHB %s\n",
325 node ? node->full_name : "<NO NAME>");
326
327 /* Create an empty bus for the toplevel */
328 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
329 if (bus == NULL) {
330 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
331 hose->global_number);
332 return;
333 }
334 bus->secondary = hose->first_busno;
335 hose->bus = bus;
336
337 /* Fixup IO space offset */
338 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
339 res->start = (res->start + io_offset) & 0xffffffffu;
340 res->end = (res->end + io_offset) & 0xffffffffu;
341
342 /* Wire up PHB bus resources */
343 pcibios_setup_phb_resources(hose);
344
345 /* Scan children */
346 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
347}
348
349static int __init pcibios_init(void)
350{
351 struct pci_controller *hose, *tmp;
352 int next_busno = 0;
353
354 printk(KERN_INFO "PCI: Probing PCI hardware\n");
355
356 if (pci_flags & PCI_REASSIGN_ALL_BUS) {
357 printk(KERN_INFO "setting pci_asign_all_busses\n");
358 pci_assign_all_buses = 1;
359 }
360
361 /* Scan all of the recorded PCI controllers. */
362 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
363 if (pci_assign_all_buses)
364 hose->first_busno = next_busno;
365 hose->last_busno = 0xff;
366 pcibios_scan_phb(hose);
367 printk(KERN_INFO "calling pci_bus_add_devices()\n");
368 pci_bus_add_devices(hose->bus);
369 if (pci_assign_all_buses || next_busno <= hose->last_busno)
370 next_busno = hose->last_busno + \
371 pcibios_assign_bus_offset;
372 }
373 pci_bus_count = next_busno;
374
375 /* OpenFirmware based machines need a map of OF bus
376 * numbers vs. kernel bus numbers since we may have to
377 * remap them.
378 */
379 if (pci_assign_all_buses)
380 pcibios_make_OF_bus_map();
381
382 /* Call common code to handle resource allocation */
383 pcibios_resource_survey();
384
385 return 0;
386}
387
388subsys_initcall(pcibios_init);
389
390static struct pci_controller*
391pci_bus_to_hose(int bus)
392{
393 struct pci_controller *hose, *tmp;
394
395 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
396 if (bus >= hose->first_busno && bus <= hose->last_busno)
397 return hose;
398 return NULL;
399}
400
401/* Provide information on locations of various I/O regions in physical
402 * memory. Do this on a per-card basis so that we choose the right
403 * root bridge.
404 * Note that the returned IO or memory base is a physical address
405 */
406
407long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
408{
409 struct pci_controller *hose;
410 long result = -EOPNOTSUPP;
411
412 hose = pci_bus_to_hose(bus);
413 if (!hose)
414 return -ENODEV;
415
416 switch (which) {
417 case IOBASE_BRIDGE_NUMBER:
418 return (long)hose->first_busno;
419 case IOBASE_MEMORY:
420 return (long)hose->pci_mem_offset;
421 case IOBASE_IO:
422 return (long)hose->io_base_phys;
423 case IOBASE_ISA_IO:
424 return (long)isa_io_base;
425 case IOBASE_ISA_MEM:
426 return (long)isa_mem_base;
427 }
428
429 return result;
430}
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
new file mode 100644
index 000000000000..7869a41b0f94
--- /dev/null
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -0,0 +1,168 @@
1/*
2 * PCI support for Xilinx plbv46_pci soft-core which can be used on
3 * Xilinx Virtex ML410 / ML510 boards.
4 *
5 * Copyright 2009 Roderick Colenbrander
6 * Copyright 2009 Secret Lab Technologies Ltd.
7 *
8 * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
9 * by Benjamin Herrenschmidt.
10 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/ioport.h>
18#include <linux/of.h>
19#include <linux/pci.h>
20#include <asm/io.h>
21
22#define XPLB_PCI_ADDR 0x10c
23#define XPLB_PCI_DATA 0x110
24#define XPLB_PCI_BUS 0x114
25
26#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
27 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
28
29static struct of_device_id xilinx_pci_match[] = {
30 { .compatible = "xlnx,plbv46-pci-1.03.a", },
31 {}
32};
33
34/**
35 * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
36 */
37static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
38{
39 struct pci_controller *hose;
40 int i;
41
42 if (dev->devfn || dev->bus->self)
43 return;
44
45 hose = pci_bus_to_host(dev->bus);
46 if (!hose)
47 return;
48
49 if (!of_match_node(xilinx_pci_match, hose->dn))
50 return;
51
52 /* Hide the PCI host BARs from the kernel as their content doesn't
53 * fit well in the resource management
54 */
55 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
56 dev->resource[i].start = 0;
57 dev->resource[i].end = 0;
58 dev->resource[i].flags = 0;
59 }
60
61 dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
62 pci_name(dev));
63}
64DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
65
66#ifdef DEBUG
67/**
68 * xilinx_pci_exclude_device - Don't do config access for non-root bus
69 *
70 * This is a hack. Config access to any bus other than bus 0 does not
71 * currently work on the ML510 so we prevent it here.
72 */
73static int
74xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
75{
76 return (bus != 0);
77}
78
79/**
80 * xilinx_early_pci_scan - List pci config space for available devices
81 *
82 * List pci devices in very early phase.
83 */
84void __init xilinx_early_pci_scan(struct pci_controller *hose)
85{
86 u32 bus = 0;
87 u32 val, dev, func, offset;
88
89 /* Currently we have only 2 device connected - up-to 32 devices */
90 for (dev = 0; dev < 2; dev++) {
91 /* List only first function number - up-to 8 functions */
92 for (func = 0; func < 1; func++) {
93 printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
94 /* read the first 64 standardized bytes */
95 /* Up-to 192 bytes can be list of capabilities */
96 for (offset = 0; offset < 64; offset += 4) {
97 early_read_config_dword(hose, bus,
98 PCI_DEVFN(dev, func), offset, &val);
99 if (offset == 0 && val == 0xFFFFFFFF) {
100 printk(KERN_CONT "\nABSENT");
101 break;
102 }
103 if (!(offset % 0x10))
104 printk(KERN_CONT "\n%04x: ", offset);
105
106 printk(KERN_CONT "%08x ", val);
107 }
108 printk(KERN_INFO "\n");
109 }
110 }
111}
112#else
113void __init xilinx_early_pci_scan(struct pci_controller *hose)
114{
115}
116#endif
117
118/**
119 * xilinx_pci_init - Find and register a Xilinx PCI host bridge
120 */
121void __init xilinx_pci_init(void)
122{
123 struct pci_controller *hose;
124 struct resource r;
125 void __iomem *pci_reg;
126 struct device_node *pci_node;
127
128 pci_node = of_find_matching_node(NULL, xilinx_pci_match);
129 if (!pci_node)
130 return;
131
132 if (of_address_to_resource(pci_node, 0, &r)) {
133 pr_err("xilinx-pci: cannot resolve base address\n");
134 return;
135 }
136
137 hose = pcibios_alloc_controller(pci_node);
138 if (!hose) {
139 pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
140 return;
141 }
142
143 /* Setup config space */
144 setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
145 r.start + XPLB_PCI_DATA,
146 INDIRECT_TYPE_SET_CFG_TYPE);
147
148 /* According to the xilinx plbv46_pci documentation the soft-core starts
149 * a self-init when the bus master enable bit is set. Without this bit
150 * set the pci bus can't be scanned.
151 */
152 early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
153
154 /* Set the max latency timer to 255 */
155 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
156
157 /* Set the max bus number to 255, and bus/subbus no's to 0 */
158 pci_reg = of_iomap(pci_node, 0);
159 out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
160 iounmap(pci_reg);
161
162 /* Register the host bridge with the linux kernel! */
163 pci_process_bridge_OF_ranges(hose, pci_node,
164 INDIRECT_TYPE_SET_CFG_TYPE);
165
166 pr_info("xilinx-pci: Registered PCI host bridge\n");
167 xilinx_early_pci_scan(hose);
168}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d102dd87c9f..0b51857fbaf7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PPC) += setup-bus.o
48obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 48obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
49obj-$(CONFIG_X86_VISWS) += setup-irq.o 49obj-$(CONFIG_X86_VISWS) += setup-irq.o
50obj-$(CONFIG_MN10300) += setup-bus.o 50obj-$(CONFIG_MN10300) += setup-bus.o
51obj-$(CONFIG_MICROBLAZE) += setup-bus.o
51 52
52# 53#
53# ACPI Related PCI FW Functions 54# ACPI Related PCI FW Functions