diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 14:53:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 14:53:32 -0400 |
commit | d19d5efd8c8840aa4f38a6dfbfe500d8cc27de46 (patch) | |
tree | 2e2f4f57de790c7de2ccd6d1afbec8695b2c7a46 /arch/powerpc/kernel | |
parent | 34c9a0ffc75ad25b6a60f61e27c4a4b1189b8085 (diff) | |
parent | 2fe0753d49402aee325cc39c476b46fd51a8afec (diff) |
Merge tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman:
- Numerous minor fixes, cleanups etc.
- More EEH work from Gavin to remove its dependency on device_nodes.
- Memory hotplug implemented entirely in the kernel from Nathan
Fontenot.
- Removal of redundant CONFIG_PPC_OF by Kevin Hao.
- Rewrite of VPHN parsing logic & tests from Greg Kurz.
- A fix from Nish Aravamudan to reduce memory usage by clamping
nodes_possible_map.
- Support for pstore on powernv from Hari Bathini.
- Removal of old powerpc specific byte swap routines by David Gibson.
- Fix from Vasant Hegde to prevent the flash driver telling you it was
flashing your firmware when it wasn't.
- Patch from Ben Herrenschmidt to add an OPAL heartbeat driver.
- Fix for an oops causing get/put_cpu_var() imbalance in perf by Jan
Stancek.
- Some fixes for migration from Tyrel Datwyler.
- A new syscall to switch the cpu endian by Michael Ellerman.
- Large series from Wei Yang to implement SRIOV, reviewed and acked by
Bjorn.
- A fix for the OPAL sensor driver from Cédric Le Goater.
- Fixes to get STRICT_MM_TYPECHECKS building again by Michael Ellerman.
- Large series from Daniel Axtens to make our PCI hooks per PHB rather
than per machine.
- Small patch from Sam Bobroff to explicitly abort non-suspended
transactions on syscalls, plus a test to exercise it.
- Numerous reworks and fixes for the 24x7 PMU from Sukadev Bhattiprolu.
- Small patch to enable the hard lockup detector from Anton Blanchard.
- Fix from Dave Olson for missing L2 cache information on some CPUs.
- Some fixes from Michael Ellerman to get Cell machines booting again.
- Freescale updates from Scott: Highlights include BMan device tree
nodes, an MSI erratum workaround, a couple minor performance
improvements, config updates, and misc fixes/cleanup.
* tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (196 commits)
powerpc/powermac: Fix build error seen with powermac smp builds
powerpc/pseries: Fix compile of memory hotplug without CONFIG_MEMORY_HOTREMOVE
powerpc: Remove PPC32 code from pseries specific find_and_init_phbs()
powerpc/cell: Fix iommu breakage caused by controller_ops change
powerpc/eeh: Fix crash in eeh_add_device_early() on Cell
powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH
powerpc/perf/hv-24x7: Fail 24x7 initcall if create_events_from_catalog() fails
powerpc/pseries: Correct memory hotplug locking
powerpc: Fix missing L2 cache size in /sys/devices/system/cpu
powerpc: Add ppc64 hard lockup detector support
oprofile: Disable oprofile NMI timer on ppc64
powerpc/perf/hv-24x7: Add missing put_cpu_var()
powerpc/perf/hv-24x7: Break up single_24x7_request
powerpc/perf/hv-24x7: Define update_event_count()
powerpc/perf/hv-24x7: Whitespace cleanup
powerpc/perf/hv-24x7: Define add_event_to_24x7_request()
powerpc/perf/hv-24x7: Rename hv_24x7_event_update
powerpc/perf/hv-24x7: Move debug prints to separate function
powerpc/perf/hv-24x7: Drop event_24x7_request()
powerpc/perf/hv-24x7: Use pr_devel() to log message
...
Conflicts:
tools/testing/selftests/powerpc/Makefile
tools/testing/selftests/powerpc/tm/Makefile
Diffstat (limited to 'arch/powerpc/kernel')
30 files changed, 1439 insertions, 308 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 502cf69b6c89..c1ebbdaac28f 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -33,7 +33,8 @@ obj-y := cputable.o ptrace.o syscalls.o \ | |||
33 | signal.o sysfs.o cacheinfo.o time.o \ | 33 | signal.o sysfs.o cacheinfo.o time.o \ |
34 | prom.o traps.o setup-common.o \ | 34 | prom.o traps.o setup-common.o \ |
35 | udbg.o misc.o io.o dma.o \ | 35 | udbg.o misc.o io.o dma.o \ |
36 | misc_$(CONFIG_WORD_SIZE).o vdso32/ | 36 | misc_$(CONFIG_WORD_SIZE).o vdso32/ \ |
37 | of_platform.o prom_parse.o | ||
37 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | 38 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ |
38 | signal_64.o ptrace32.o \ | 39 | signal_64.o ptrace32.o \ |
39 | paca.o nvram_64.o firmware.o | 40 | paca.o nvram_64.o firmware.o |
@@ -47,7 +48,6 @@ obj-$(CONFIG_PPC64) += vdso64/ | |||
47 | obj-$(CONFIG_ALTIVEC) += vecemu.o | 48 | obj-$(CONFIG_ALTIVEC) += vecemu.o |
48 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 49 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
49 | obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o | 50 | obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o |
50 | obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o | ||
51 | procfs-y := proc_powerpc.o | 51 | procfs-y := proc_powerpc.o |
52 | obj-$(CONFIG_PROC_FS) += $(procfs-y) | 52 | obj-$(CONFIG_PROC_FS) += $(procfs-y) |
53 | rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o | 53 | rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o |
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index ae77b7e59889..c641983bbdd6 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c | |||
@@ -61,12 +61,22 @@ struct cache_type_info { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* These are used to index the cache_type_info array. */ | 63 | /* These are used to index the cache_type_info array. */ |
64 | #define CACHE_TYPE_UNIFIED 0 | 64 | #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ |
65 | #define CACHE_TYPE_INSTRUCTION 1 | 65 | #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ |
66 | #define CACHE_TYPE_DATA 2 | 66 | #define CACHE_TYPE_INSTRUCTION 2 |
67 | #define CACHE_TYPE_DATA 3 | ||
67 | 68 | ||
68 | static const struct cache_type_info cache_type_info[] = { | 69 | static const struct cache_type_info cache_type_info[] = { |
69 | { | 70 | { |
71 | /* Embedded systems that use cache-size, cache-block-size, | ||
72 | * etc. for the Unified (typically L2) cache. */ | ||
73 | .name = "Unified", | ||
74 | .size_prop = "cache-size", | ||
75 | .line_size_props = { "cache-line-size", | ||
76 | "cache-block-size", }, | ||
77 | .nr_sets_prop = "cache-sets", | ||
78 | }, | ||
79 | { | ||
70 | /* PowerPC Processor binding says the [di]-cache-* | 80 | /* PowerPC Processor binding says the [di]-cache-* |
71 | * must be equal on unified caches, so just use | 81 | * must be equal on unified caches, so just use |
72 | * d-cache properties. */ | 82 | * d-cache properties. */ |
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache) | |||
293 | { | 303 | { |
294 | struct cache *iter; | 304 | struct cache *iter; |
295 | 305 | ||
296 | if (cache->type == CACHE_TYPE_UNIFIED) | 306 | if (cache->type == CACHE_TYPE_UNIFIED || |
307 | cache->type == CACHE_TYPE_UNIFIED_D) | ||
297 | return cache; | 308 | return cache; |
298 | 309 | ||
299 | list_for_each_entry(iter, &cache_list, list) | 310 | list_for_each_entry(iter, &cache_list, list) |
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np) | |||
324 | return of_get_property(np, "cache-unified", NULL); | 335 | return of_get_property(np, "cache-unified", NULL); |
325 | } | 336 | } |
326 | 337 | ||
327 | static struct cache *cache_do_one_devnode_unified(struct device_node *node, | 338 | /* |
328 | int level) | 339 | * Unified caches can have two different sets of tags. Most embedded |
340 | * use cache-size, etc. for the unified cache size, but open firmware systems | ||
341 | * use d-cache-size, etc. Check on initialization for which type we have, and | ||
342 | * return the appropriate structure type. Assume it's embedded if it isn't | ||
343 | * open firmware. If it's yet a 3rd type, then there will be missing entries | ||
344 | * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need | ||
345 | * to be extended further. | ||
346 | */ | ||
347 | static int cache_is_unified_d(const struct device_node *np) | ||
329 | { | 348 | { |
330 | struct cache *cache; | 349 | return of_get_property(np, |
350 | cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? | ||
351 | CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; | ||
352 | } | ||
331 | 353 | ||
354 | /* | ||
355 | */ | ||
356 | static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) | ||
357 | { | ||
332 | pr_debug("creating L%d ucache for %s\n", level, node->full_name); | 358 | pr_debug("creating L%d ucache for %s\n", level, node->full_name); |
333 | 359 | ||
334 | cache = new_cache(CACHE_TYPE_UNIFIED, level, node); | 360 | return new_cache(cache_is_unified_d(node), level, node); |
335 | |||
336 | return cache; | ||
337 | } | 361 | } |
338 | 362 | ||
339 | static struct cache *cache_do_one_devnode_split(struct device_node *node, | 363 | static struct cache *cache_do_one_devnode_split(struct device_node *node, |
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 46733535cc0b..9c9b7411b28b 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S | |||
@@ -137,15 +137,11 @@ __init_HFSCR: | |||
137 | /* | 137 | /* |
138 | * Clear the TLB using the specified IS form of tlbiel instruction | 138 | * Clear the TLB using the specified IS form of tlbiel instruction |
139 | * (invalidate by congruence class). P7 has 128 CCs., P8 has 512. | 139 | * (invalidate by congruence class). P7 has 128 CCs., P8 has 512. |
140 | * | ||
141 | * r3 = IS field | ||
142 | */ | 140 | */ |
143 | __init_tlb_power7: | 141 | __init_tlb_power7: |
144 | li r3,0xc00 /* IS field = 0b11 */ | ||
145 | _GLOBAL(__flush_tlb_power7) | ||
146 | li r6,128 | 142 | li r6,128 |
147 | mtctr r6 | 143 | mtctr r6 |
148 | mr r7,r3 /* IS field */ | 144 | li r7,0xc00 /* IS field = 0b11 */ |
149 | ptesync | 145 | ptesync |
150 | 2: tlbiel r7 | 146 | 2: tlbiel r7 |
151 | addi r7,r7,0x1000 | 147 | addi r7,r7,0x1000 |
@@ -154,11 +150,9 @@ _GLOBAL(__flush_tlb_power7) | |||
154 | 1: blr | 150 | 1: blr |
155 | 151 | ||
156 | __init_tlb_power8: | 152 | __init_tlb_power8: |
157 | li r3,0xc00 /* IS field = 0b11 */ | ||
158 | _GLOBAL(__flush_tlb_power8) | ||
159 | li r6,512 | 153 | li r6,512 |
160 | mtctr r6 | 154 | mtctr r6 |
161 | mr r7,r3 /* IS field */ | 155 | li r7,0xc00 /* IS field = 0b11 */ |
162 | ptesync | 156 | ptesync |
163 | 2: tlbiel r7 | 157 | 2: tlbiel r7 |
164 | addi r7,r7,0x1000 | 158 | addi r7,r7,0x1000 |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f83046878336..60262fdf35ba 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -71,8 +71,8 @@ extern void __restore_cpu_power7(void); | |||
71 | extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); | 71 | extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); |
72 | extern void __restore_cpu_power8(void); | 72 | extern void __restore_cpu_power8(void); |
73 | extern void __restore_cpu_a2(void); | 73 | extern void __restore_cpu_a2(void); |
74 | extern void __flush_tlb_power7(unsigned long inval_selector); | 74 | extern void __flush_tlb_power7(unsigned int action); |
75 | extern void __flush_tlb_power8(unsigned long inval_selector); | 75 | extern void __flush_tlb_power8(unsigned int action); |
76 | extern long __machine_check_early_realmode_p7(struct pt_regs *regs); | 76 | extern long __machine_check_early_realmode_p7(struct pt_regs *regs); |
77 | extern long __machine_check_early_realmode_p8(struct pt_regs *regs); | 77 | extern long __machine_check_early_realmode_p8(struct pt_regs *regs); |
78 | #endif /* CONFIG_PPC64 */ | 78 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 735979764cd4..6e8d764ce47b 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -116,16 +116,13 @@ void __init swiotlb_detect_4g(void) | |||
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
119 | static int __init swiotlb_late_init(void) | 119 | static int __init check_swiotlb_enabled(void) |
120 | { | 120 | { |
121 | if (ppc_swiotlb_enable) { | 121 | if (ppc_swiotlb_enable) |
122 | swiotlb_print_info(); | 122 | swiotlb_print_info(); |
123 | set_pci_dma_ops(&swiotlb_dma_ops); | 123 | else |
124 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; | ||
125 | } else { | ||
126 | swiotlb_free(); | 124 | swiotlb_free(); |
127 | } | ||
128 | 125 | ||
129 | return 0; | 126 | return 0; |
130 | } | 127 | } |
131 | subsys_initcall(swiotlb_late_init); | 128 | subsys_initcall(check_swiotlb_enabled); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 3b2252e7731b..a4c62eb0ee48 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -164,30 +164,34 @@ __setup("eeh=", eeh_setup); | |||
164 | */ | 164 | */ |
165 | static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | 165 | static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) |
166 | { | 166 | { |
167 | struct device_node *dn = eeh_dev_to_of_node(edev); | 167 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); |
168 | u32 cfg; | 168 | u32 cfg; |
169 | int cap, i; | 169 | int cap, i; |
170 | int n = 0, l = 0; | 170 | int n = 0, l = 0; |
171 | char buffer[128]; | 171 | char buffer[128]; |
172 | 172 | ||
173 | n += scnprintf(buf+n, len-n, "%s\n", dn->full_name); | 173 | n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n", |
174 | pr_warn("EEH: of node=%s\n", dn->full_name); | 174 | edev->phb->global_number, pdn->busno, |
175 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | ||
176 | pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n", | ||
177 | edev->phb->global_number, pdn->busno, | ||
178 | PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); | ||
175 | 179 | ||
176 | eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg); | 180 | eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg); |
177 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); | 181 | n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); |
178 | pr_warn("EEH: PCI device/vendor: %08x\n", cfg); | 182 | pr_warn("EEH: PCI device/vendor: %08x\n", cfg); |
179 | 183 | ||
180 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg); | 184 | eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg); |
181 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); | 185 | n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); |
182 | pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); | 186 | pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); |
183 | 187 | ||
184 | /* Gather bridge-specific registers */ | 188 | /* Gather bridge-specific registers */ |
185 | if (edev->mode & EEH_DEV_BRIDGE) { | 189 | if (edev->mode & EEH_DEV_BRIDGE) { |
186 | eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg); | 190 | eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg); |
187 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); | 191 | n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); |
188 | pr_warn("EEH: Bridge secondary status: %04x\n", cfg); | 192 | pr_warn("EEH: Bridge secondary status: %04x\n", cfg); |
189 | 193 | ||
190 | eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg); | 194 | eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg); |
191 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); | 195 | n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); |
192 | pr_warn("EEH: Bridge control: %04x\n", cfg); | 196 | pr_warn("EEH: Bridge control: %04x\n", cfg); |
193 | } | 197 | } |
@@ -195,11 +199,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
195 | /* Dump out the PCI-X command and status regs */ | 199 | /* Dump out the PCI-X command and status regs */ |
196 | cap = edev->pcix_cap; | 200 | cap = edev->pcix_cap; |
197 | if (cap) { | 201 | if (cap) { |
198 | eeh_ops->read_config(dn, cap, 4, &cfg); | 202 | eeh_ops->read_config(pdn, cap, 4, &cfg); |
199 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); | 203 | n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); |
200 | pr_warn("EEH: PCI-X cmd: %08x\n", cfg); | 204 | pr_warn("EEH: PCI-X cmd: %08x\n", cfg); |
201 | 205 | ||
202 | eeh_ops->read_config(dn, cap+4, 4, &cfg); | 206 | eeh_ops->read_config(pdn, cap+4, 4, &cfg); |
203 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); | 207 | n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); |
204 | pr_warn("EEH: PCI-X status: %08x\n", cfg); | 208 | pr_warn("EEH: PCI-X status: %08x\n", cfg); |
205 | } | 209 | } |
@@ -211,7 +215,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
211 | pr_warn("EEH: PCI-E capabilities and status follow:\n"); | 215 | pr_warn("EEH: PCI-E capabilities and status follow:\n"); |
212 | 216 | ||
213 | for (i=0; i<=8; i++) { | 217 | for (i=0; i<=8; i++) { |
214 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); | 218 | eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); |
215 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); | 219 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
216 | 220 | ||
217 | if ((i % 4) == 0) { | 221 | if ((i % 4) == 0) { |
@@ -238,7 +242,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) | |||
238 | pr_warn("EEH: PCI-E AER capability register set follows:\n"); | 242 | pr_warn("EEH: PCI-E AER capability register set follows:\n"); |
239 | 243 | ||
240 | for (i=0; i<=13; i++) { | 244 | for (i=0; i<=13; i++) { |
241 | eeh_ops->read_config(dn, cap+4*i, 4, &cfg); | 245 | eeh_ops->read_config(pdn, cap+4*i, 4, &cfg); |
242 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); | 246 | n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); |
243 | 247 | ||
244 | if ((i % 4) == 0) { | 248 | if ((i % 4) == 0) { |
@@ -414,11 +418,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
414 | int ret; | 418 | int ret; |
415 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | 419 | int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); |
416 | unsigned long flags; | 420 | unsigned long flags; |
417 | struct device_node *dn; | 421 | struct pci_dn *pdn; |
418 | struct pci_dev *dev; | 422 | struct pci_dev *dev; |
419 | struct eeh_pe *pe, *parent_pe, *phb_pe; | 423 | struct eeh_pe *pe, *parent_pe, *phb_pe; |
420 | int rc = 0; | 424 | int rc = 0; |
421 | const char *location; | 425 | const char *location = NULL; |
422 | 426 | ||
423 | eeh_stats.total_mmio_ffs++; | 427 | eeh_stats.total_mmio_ffs++; |
424 | 428 | ||
@@ -429,15 +433,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
429 | eeh_stats.no_dn++; | 433 | eeh_stats.no_dn++; |
430 | return 0; | 434 | return 0; |
431 | } | 435 | } |
432 | dn = eeh_dev_to_of_node(edev); | ||
433 | dev = eeh_dev_to_pci_dev(edev); | 436 | dev = eeh_dev_to_pci_dev(edev); |
434 | pe = eeh_dev_to_pe(edev); | 437 | pe = eeh_dev_to_pe(edev); |
435 | 438 | ||
436 | /* Access to IO BARs might get this far and still not want checking. */ | 439 | /* Access to IO BARs might get this far and still not want checking. */ |
437 | if (!pe) { | 440 | if (!pe) { |
438 | eeh_stats.ignored_check++; | 441 | eeh_stats.ignored_check++; |
439 | pr_debug("EEH: Ignored check for %s %s\n", | 442 | pr_debug("EEH: Ignored check for %s\n", |
440 | eeh_pci_name(dev), dn->full_name); | 443 | eeh_pci_name(dev)); |
441 | return 0; | 444 | return 0; |
442 | } | 445 | } |
443 | 446 | ||
@@ -473,10 +476,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
473 | if (pe->state & EEH_PE_ISOLATED) { | 476 | if (pe->state & EEH_PE_ISOLATED) { |
474 | pe->check_count++; | 477 | pe->check_count++; |
475 | if (pe->check_count % EEH_MAX_FAILS == 0) { | 478 | if (pe->check_count % EEH_MAX_FAILS == 0) { |
476 | location = of_get_property(dn, "ibm,loc-code", NULL); | 479 | pdn = eeh_dev_to_pdn(edev); |
480 | if (pdn->node) | ||
481 | location = of_get_property(pdn->node, "ibm,loc-code", NULL); | ||
477 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " | 482 | printk(KERN_ERR "EEH: %d reads ignored for recovering device at " |
478 | "location=%s driver=%s pci addr=%s\n", | 483 | "location=%s driver=%s pci addr=%s\n", |
479 | pe->check_count, location, | 484 | pe->check_count, |
485 | location ? location : "unknown", | ||
480 | eeh_driver_name(dev), eeh_pci_name(dev)); | 486 | eeh_driver_name(dev), eeh_pci_name(dev)); |
481 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", | 487 | printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n", |
482 | eeh_driver_name(dev)); | 488 | eeh_driver_name(dev)); |
@@ -667,6 +673,55 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) | |||
667 | return rc; | 673 | return rc; |
668 | } | 674 | } |
669 | 675 | ||
676 | static void *eeh_disable_and_save_dev_state(void *data, void *userdata) | ||
677 | { | ||
678 | struct eeh_dev *edev = data; | ||
679 | struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); | ||
680 | struct pci_dev *dev = userdata; | ||
681 | |||
682 | /* | ||
683 | * The caller should have disabled and saved the | ||
684 | * state for the specified device | ||
685 | */ | ||
686 | if (!pdev || pdev == dev) | ||
687 | return NULL; | ||
688 | |||
689 | /* Ensure we have D0 power state */ | ||
690 | pci_set_power_state(pdev, PCI_D0); | ||
691 | |||
692 | /* Save device state */ | ||
693 | pci_save_state(pdev); | ||
694 | |||
695 | /* | ||
696 | * Disable device to avoid any DMA traffic and | ||
697 | * interrupt from the device | ||
698 | */ | ||
699 | pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); | ||
700 | |||
701 | return NULL; | ||
702 | } | ||
703 | |||
704 | static void *eeh_restore_dev_state(void *data, void *userdata) | ||
705 | { | ||
706 | struct eeh_dev *edev = data; | ||
707 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
708 | struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); | ||
709 | struct pci_dev *dev = userdata; | ||
710 | |||
711 | if (!pdev) | ||
712 | return NULL; | ||
713 | |||
714 | /* Apply customization from firmware */ | ||
715 | if (pdn && eeh_ops->restore_config) | ||
716 | eeh_ops->restore_config(pdn); | ||
717 | |||
718 | /* The caller should restore state for the specified device */ | ||
719 | if (pdev != dev) | ||
720 | pci_save_state(pdev); | ||
721 | |||
722 | return NULL; | ||
723 | } | ||
724 | |||
670 | /** | 725 | /** |
671 | * pcibios_set_pcie_slot_reset - Set PCI-E reset state | 726 | * pcibios_set_pcie_slot_reset - Set PCI-E reset state |
672 | * @dev: pci device struct | 727 | * @dev: pci device struct |
@@ -689,13 +744,19 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
689 | switch (state) { | 744 | switch (state) { |
690 | case pcie_deassert_reset: | 745 | case pcie_deassert_reset: |
691 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); | 746 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
747 | eeh_unfreeze_pe(pe, false); | ||
692 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | 748 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
749 | eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); | ||
693 | break; | 750 | break; |
694 | case pcie_hot_reset: | 751 | case pcie_hot_reset: |
752 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | ||
753 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | ||
695 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | 754 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
696 | eeh_ops->reset(pe, EEH_RESET_HOT); | 755 | eeh_ops->reset(pe, EEH_RESET_HOT); |
697 | break; | 756 | break; |
698 | case pcie_warm_reset: | 757 | case pcie_warm_reset: |
758 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | ||
759 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | ||
699 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | 760 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
700 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); | 761 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
701 | break; | 762 | break; |
@@ -815,15 +876,15 @@ out: | |||
815 | */ | 876 | */ |
816 | void eeh_save_bars(struct eeh_dev *edev) | 877 | void eeh_save_bars(struct eeh_dev *edev) |
817 | { | 878 | { |
879 | struct pci_dn *pdn; | ||
818 | int i; | 880 | int i; |
819 | struct device_node *dn; | ||
820 | 881 | ||
821 | if (!edev) | 882 | pdn = eeh_dev_to_pdn(edev); |
883 | if (!pdn) | ||
822 | return; | 884 | return; |
823 | dn = eeh_dev_to_of_node(edev); | ||
824 | 885 | ||
825 | for (i = 0; i < 16; i++) | 886 | for (i = 0; i < 16; i++) |
826 | eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]); | 887 | eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]); |
827 | 888 | ||
828 | /* | 889 | /* |
829 | * For PCI bridges including root port, we need enable bus | 890 | * For PCI bridges including root port, we need enable bus |
@@ -914,7 +975,7 @@ static struct notifier_block eeh_reboot_nb = { | |||
914 | int eeh_init(void) | 975 | int eeh_init(void) |
915 | { | 976 | { |
916 | struct pci_controller *hose, *tmp; | 977 | struct pci_controller *hose, *tmp; |
917 | struct device_node *phb; | 978 | struct pci_dn *pdn; |
918 | static int cnt = 0; | 979 | static int cnt = 0; |
919 | int ret = 0; | 980 | int ret = 0; |
920 | 981 | ||
@@ -949,20 +1010,9 @@ int eeh_init(void) | |||
949 | return ret; | 1010 | return ret; |
950 | 1011 | ||
951 | /* Enable EEH for all adapters */ | 1012 | /* Enable EEH for all adapters */ |
952 | if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) { | 1013 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
953 | list_for_each_entry_safe(hose, tmp, | 1014 | pdn = hose->pci_data; |
954 | &hose_list, list_node) { | 1015 | traverse_pci_dn(pdn, eeh_ops->probe, NULL); |
955 | phb = hose->dn; | ||
956 | traverse_pci_devices(phb, eeh_ops->of_probe, NULL); | ||
957 | } | ||
958 | } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) { | ||
959 | list_for_each_entry_safe(hose, tmp, | ||
960 | &hose_list, list_node) | ||
961 | pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL); | ||
962 | } else { | ||
963 | pr_warn("%s: Invalid probe mode %x", | ||
964 | __func__, eeh_subsystem_flags); | ||
965 | return -EINVAL; | ||
966 | } | 1016 | } |
967 | 1017 | ||
968 | /* | 1018 | /* |
@@ -987,8 +1037,8 @@ int eeh_init(void) | |||
987 | core_initcall_sync(eeh_init); | 1037 | core_initcall_sync(eeh_init); |
988 | 1038 | ||
989 | /** | 1039 | /** |
990 | * eeh_add_device_early - Enable EEH for the indicated device_node | 1040 | * eeh_add_device_early - Enable EEH for the indicated device node |
991 | * @dn: device node for which to set up EEH | 1041 | * @pdn: PCI device node for which to set up EEH |
992 | * | 1042 | * |
993 | * This routine must be used to perform EEH initialization for PCI | 1043 | * This routine must be used to perform EEH initialization for PCI |
994 | * devices that were added after system boot (e.g. hotplug, dlpar). | 1044 | * devices that were added after system boot (e.g. hotplug, dlpar). |
@@ -998,44 +1048,41 @@ core_initcall_sync(eeh_init); | |||
998 | * on the CEC architecture, type of the device, on earlier boot | 1048 | * on the CEC architecture, type of the device, on earlier boot |
999 | * command-line arguments & etc. | 1049 | * command-line arguments & etc. |
1000 | */ | 1050 | */ |
1001 | void eeh_add_device_early(struct device_node *dn) | 1051 | void eeh_add_device_early(struct pci_dn *pdn) |
1002 | { | 1052 | { |
1003 | struct pci_controller *phb; | 1053 | struct pci_controller *phb; |
1054 | struct eeh_dev *edev = pdn_to_eeh_dev(pdn); | ||
1004 | 1055 | ||
1005 | /* | 1056 | if (!edev || !eeh_enabled()) |
1006 | * If we're doing EEH probe based on PCI device, we | ||
1007 | * would delay the probe until late stage because | ||
1008 | * the PCI device isn't available this moment. | ||
1009 | */ | ||
1010 | if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) | ||
1011 | return; | 1057 | return; |
1012 | 1058 | ||
1013 | if (!of_node_to_eeh_dev(dn)) | ||
1014 | return; | ||
1015 | phb = of_node_to_eeh_dev(dn)->phb; | ||
1016 | |||
1017 | /* USB Bus children of PCI devices will not have BUID's */ | 1059 | /* USB Bus children of PCI devices will not have BUID's */ |
1018 | if (NULL == phb || 0 == phb->buid) | 1060 | phb = edev->phb; |
1061 | if (NULL == phb || | ||
1062 | (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid)) | ||
1019 | return; | 1063 | return; |
1020 | 1064 | ||
1021 | eeh_ops->of_probe(dn, NULL); | 1065 | eeh_ops->probe(pdn, NULL); |
1022 | } | 1066 | } |
1023 | 1067 | ||
1024 | /** | 1068 | /** |
1025 | * eeh_add_device_tree_early - Enable EEH for the indicated device | 1069 | * eeh_add_device_tree_early - Enable EEH for the indicated device |
1026 | * @dn: device node | 1070 | * @pdn: PCI device node |
1027 | * | 1071 | * |
1028 | * This routine must be used to perform EEH initialization for the | 1072 | * This routine must be used to perform EEH initialization for the |
1029 | * indicated PCI device that was added after system boot (e.g. | 1073 | * indicated PCI device that was added after system boot (e.g. |
1030 | * hotplug, dlpar). | 1074 | * hotplug, dlpar). |
1031 | */ | 1075 | */ |
1032 | void eeh_add_device_tree_early(struct device_node *dn) | 1076 | void eeh_add_device_tree_early(struct pci_dn *pdn) |
1033 | { | 1077 | { |
1034 | struct device_node *sib; | 1078 | struct pci_dn *n; |
1035 | 1079 | ||
1036 | for_each_child_of_node(dn, sib) | 1080 | if (!pdn) |
1037 | eeh_add_device_tree_early(sib); | 1081 | return; |
1038 | eeh_add_device_early(dn); | 1082 | |
1083 | list_for_each_entry(n, &pdn->child_list, list) | ||
1084 | eeh_add_device_tree_early(n); | ||
1085 | eeh_add_device_early(pdn); | ||
1039 | } | 1086 | } |
1040 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); | 1087 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); |
1041 | 1088 | ||
@@ -1048,7 +1095,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); | |||
1048 | */ | 1095 | */ |
1049 | void eeh_add_device_late(struct pci_dev *dev) | 1096 | void eeh_add_device_late(struct pci_dev *dev) |
1050 | { | 1097 | { |
1051 | struct device_node *dn; | 1098 | struct pci_dn *pdn; |
1052 | struct eeh_dev *edev; | 1099 | struct eeh_dev *edev; |
1053 | 1100 | ||
1054 | if (!dev || !eeh_enabled()) | 1101 | if (!dev || !eeh_enabled()) |
@@ -1056,8 +1103,8 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
1056 | 1103 | ||
1057 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); | 1104 | pr_debug("EEH: Adding device %s\n", pci_name(dev)); |
1058 | 1105 | ||
1059 | dn = pci_device_to_OF_node(dev); | 1106 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
1060 | edev = of_node_to_eeh_dev(dn); | 1107 | edev = pdn_to_eeh_dev(pdn); |
1061 | if (edev->pdev == dev) { | 1108 | if (edev->pdev == dev) { |
1062 | pr_debug("EEH: Already referenced !\n"); | 1109 | pr_debug("EEH: Already referenced !\n"); |
1063 | return; | 1110 | return; |
@@ -1089,13 +1136,6 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
1089 | edev->pdev = dev; | 1136 | edev->pdev = dev; |
1090 | dev->dev.archdata.edev = edev; | 1137 | dev->dev.archdata.edev = edev; |
1091 | 1138 | ||
1092 | /* | ||
1093 | * We have to do the EEH probe here because the PCI device | ||
1094 | * hasn't been created yet in the early stage. | ||
1095 | */ | ||
1096 | if (eeh_has_flag(EEH_PROBE_MODE_DEV)) | ||
1097 | eeh_ops->dev_probe(dev, NULL); | ||
1098 | |||
1099 | eeh_addr_cache_insert_dev(dev); | 1139 | eeh_addr_cache_insert_dev(dev); |
1100 | } | 1140 | } |
1101 | 1141 | ||
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index 07d8a2423a61..eeabeabea49c 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c | |||
@@ -171,30 +171,27 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | |||
171 | 171 | ||
172 | static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) | 172 | static void __eeh_addr_cache_insert_dev(struct pci_dev *dev) |
173 | { | 173 | { |
174 | struct device_node *dn; | 174 | struct pci_dn *pdn; |
175 | struct eeh_dev *edev; | 175 | struct eeh_dev *edev; |
176 | int i; | 176 | int i; |
177 | 177 | ||
178 | dn = pci_device_to_OF_node(dev); | 178 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
179 | if (!dn) { | 179 | if (!pdn) { |
180 | pr_warn("PCI: no pci dn found for dev=%s\n", | 180 | pr_warn("PCI: no pci dn found for dev=%s\n", |
181 | pci_name(dev)); | 181 | pci_name(dev)); |
182 | return; | 182 | return; |
183 | } | 183 | } |
184 | 184 | ||
185 | edev = of_node_to_eeh_dev(dn); | 185 | edev = pdn_to_eeh_dev(pdn); |
186 | if (!edev) { | 186 | if (!edev) { |
187 | pr_warn("PCI: no EEH dev found for dn=%s\n", | 187 | pr_warn("PCI: no EEH dev found for %s\n", |
188 | dn->full_name); | 188 | pci_name(dev)); |
189 | return; | 189 | return; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* Skip any devices for which EEH is not enabled. */ | 192 | /* Skip any devices for which EEH is not enabled. */ |
193 | if (!edev->pe) { | 193 | if (!edev->pe) { |
194 | #ifdef DEBUG | 194 | dev_dbg(&dev->dev, "EEH: Skip building address cache\n"); |
195 | pr_info("PCI: skip building address cache for=%s - %s\n", | ||
196 | pci_name(dev), dn->full_name); | ||
197 | #endif | ||
198 | return; | 195 | return; |
199 | } | 196 | } |
200 | 197 | ||
@@ -282,18 +279,18 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev) | |||
282 | */ | 279 | */ |
283 | void eeh_addr_cache_build(void) | 280 | void eeh_addr_cache_build(void) |
284 | { | 281 | { |
285 | struct device_node *dn; | 282 | struct pci_dn *pdn; |
286 | struct eeh_dev *edev; | 283 | struct eeh_dev *edev; |
287 | struct pci_dev *dev = NULL; | 284 | struct pci_dev *dev = NULL; |
288 | 285 | ||
289 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); | 286 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); |
290 | 287 | ||
291 | for_each_pci_dev(dev) { | 288 | for_each_pci_dev(dev) { |
292 | dn = pci_device_to_OF_node(dev); | 289 | pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); |
293 | if (!dn) | 290 | if (!pdn) |
294 | continue; | 291 | continue; |
295 | 292 | ||
296 | edev = of_node_to_eeh_dev(dn); | 293 | edev = pdn_to_eeh_dev(pdn); |
297 | if (!edev) | 294 | if (!edev) |
298 | continue; | 295 | continue; |
299 | 296 | ||
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index e5274ee9a75f..aabba94ff9cb 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c | |||
@@ -43,13 +43,13 @@ | |||
43 | 43 | ||
44 | /** | 44 | /** |
45 | * eeh_dev_init - Create EEH device according to OF node | 45 | * eeh_dev_init - Create EEH device according to OF node |
46 | * @dn: device node | 46 | * @pdn: PCI device node |
47 | * @data: PHB | 47 | * @data: PHB |
48 | * | 48 | * |
49 | * It will create EEH device according to the given OF node. The function | 49 | * It will create EEH device according to the given OF node. The function |
50 | * might be called by PCI emunation, DR, PHB hotplug. | 50 | * might be called by PCI emunation, DR, PHB hotplug. |
51 | */ | 51 | */ |
52 | void *eeh_dev_init(struct device_node *dn, void *data) | 52 | void *eeh_dev_init(struct pci_dn *pdn, void *data) |
53 | { | 53 | { |
54 | struct pci_controller *phb = data; | 54 | struct pci_controller *phb = data; |
55 | struct eeh_dev *edev; | 55 | struct eeh_dev *edev; |
@@ -63,8 +63,8 @@ void *eeh_dev_init(struct device_node *dn, void *data) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | /* Associate EEH device with OF node */ | 65 | /* Associate EEH device with OF node */ |
66 | PCI_DN(dn)->edev = edev; | 66 | pdn->edev = edev; |
67 | edev->dn = dn; | 67 | edev->pdn = pdn; |
68 | edev->phb = phb; | 68 | edev->phb = phb; |
69 | INIT_LIST_HEAD(&edev->list); | 69 | INIT_LIST_HEAD(&edev->list); |
70 | 70 | ||
@@ -80,16 +80,16 @@ void *eeh_dev_init(struct device_node *dn, void *data) | |||
80 | */ | 80 | */ |
81 | void eeh_dev_phb_init_dynamic(struct pci_controller *phb) | 81 | void eeh_dev_phb_init_dynamic(struct pci_controller *phb) |
82 | { | 82 | { |
83 | struct device_node *dn = phb->dn; | 83 | struct pci_dn *root = phb->pci_data; |
84 | 84 | ||
85 | /* EEH PE for PHB */ | 85 | /* EEH PE for PHB */ |
86 | eeh_phb_pe_create(phb); | 86 | eeh_phb_pe_create(phb); |
87 | 87 | ||
88 | /* EEH device for PHB */ | 88 | /* EEH device for PHB */ |
89 | eeh_dev_init(dn, phb); | 89 | eeh_dev_init(root, phb); |
90 | 90 | ||
91 | /* EEH devices for children OF nodes */ | 91 | /* EEH devices for children OF nodes */ |
92 | traverse_pci_devices(dn, eeh_dev_init, phb); | 92 | traverse_pci_dn(root, eeh_dev_init, phb); |
93 | } | 93 | } |
94 | 94 | ||
95 | /** | 95 | /** |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index d099540c0f56..24768ff3cb73 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -83,28 +83,6 @@ static inline void eeh_pcid_put(struct pci_dev *pdev) | |||
83 | module_put(pdev->driver->driver.owner); | 83 | module_put(pdev->driver->driver.owner); |
84 | } | 84 | } |
85 | 85 | ||
86 | #if 0 | ||
87 | static void print_device_node_tree(struct pci_dn *pdn, int dent) | ||
88 | { | ||
89 | int i; | ||
90 | struct device_node *pc; | ||
91 | |||
92 | if (!pdn) | ||
93 | return; | ||
94 | for (i = 0; i < dent; i++) | ||
95 | printk(" "); | ||
96 | printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n", | ||
97 | pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr, | ||
98 | pdn->eeh_pe_config_addr, pdn->node->full_name); | ||
99 | dent += 3; | ||
100 | pc = pdn->node->child; | ||
101 | while (pc) { | ||
102 | print_device_node_tree(PCI_DN(pc), dent); | ||
103 | pc = pc->sibling; | ||
104 | } | ||
105 | } | ||
106 | #endif | ||
107 | |||
108 | /** | 86 | /** |
109 | * eeh_disable_irq - Disable interrupt for the recovering device | 87 | * eeh_disable_irq - Disable interrupt for the recovering device |
110 | * @dev: PCI device | 88 | * @dev: PCI device |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 1e4946c36f9e..35f0b62259bb 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
@@ -291,27 +291,25 @@ struct eeh_pe *eeh_pe_get(struct eeh_dev *edev) | |||
291 | */ | 291 | */ |
292 | static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) | 292 | static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev) |
293 | { | 293 | { |
294 | struct device_node *dn; | ||
295 | struct eeh_dev *parent; | 294 | struct eeh_dev *parent; |
295 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
296 | 296 | ||
297 | /* | 297 | /* |
298 | * It might have the case for the indirect parent | 298 | * It might have the case for the indirect parent |
299 | * EEH device already having associated PE, but | 299 | * EEH device already having associated PE, but |
300 | * the direct parent EEH device doesn't have yet. | 300 | * the direct parent EEH device doesn't have yet. |
301 | */ | 301 | */ |
302 | dn = edev->dn->parent; | 302 | pdn = pdn ? pdn->parent : NULL; |
303 | while (dn) { | 303 | while (pdn) { |
304 | /* We're poking out of PCI territory */ | 304 | /* We're poking out of PCI territory */ |
305 | if (!PCI_DN(dn)) return NULL; | 305 | parent = pdn_to_eeh_dev(pdn); |
306 | 306 | if (!parent) | |
307 | parent = of_node_to_eeh_dev(dn); | 307 | return NULL; |
308 | /* We're poking out of PCI territory */ | ||
309 | if (!parent) return NULL; | ||
310 | 308 | ||
311 | if (parent->pe) | 309 | if (parent->pe) |
312 | return parent->pe; | 310 | return parent->pe; |
313 | 311 | ||
314 | dn = dn->parent; | 312 | pdn = pdn->parent; |
315 | } | 313 | } |
316 | 314 | ||
317 | return NULL; | 315 | return NULL; |
@@ -330,6 +328,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
330 | { | 328 | { |
331 | struct eeh_pe *pe, *parent; | 329 | struct eeh_pe *pe, *parent; |
332 | 330 | ||
331 | /* Check if the PE number is valid */ | ||
332 | if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) { | ||
333 | pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n", | ||
334 | __func__, edev->config_addr, edev->phb->global_number); | ||
335 | return -EINVAL; | ||
336 | } | ||
337 | |||
333 | /* | 338 | /* |
334 | * Search the PE has been existing or not according | 339 | * Search the PE has been existing or not according |
335 | * to the PE address. If that has been existing, the | 340 | * to the PE address. If that has been existing, the |
@@ -338,21 +343,18 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
338 | */ | 343 | */ |
339 | pe = eeh_pe_get(edev); | 344 | pe = eeh_pe_get(edev); |
340 | if (pe && !(pe->type & EEH_PE_INVALID)) { | 345 | if (pe && !(pe->type & EEH_PE_INVALID)) { |
341 | if (!edev->pe_config_addr) { | ||
342 | pr_err("%s: PE with addr 0x%x already exists\n", | ||
343 | __func__, edev->config_addr); | ||
344 | return -EEXIST; | ||
345 | } | ||
346 | |||
347 | /* Mark the PE as type of PCI bus */ | 346 | /* Mark the PE as type of PCI bus */ |
348 | pe->type = EEH_PE_BUS; | 347 | pe->type = EEH_PE_BUS; |
349 | edev->pe = pe; | 348 | edev->pe = pe; |
350 | 349 | ||
351 | /* Put the edev to PE */ | 350 | /* Put the edev to PE */ |
352 | list_add_tail(&edev->list, &pe->edevs); | 351 | list_add_tail(&edev->list, &pe->edevs); |
353 | pr_debug("EEH: Add %s to Bus PE#%x\n", | 352 | pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", |
354 | edev->dn->full_name, pe->addr); | 353 | edev->phb->global_number, |
355 | 354 | edev->config_addr >> 8, | |
355 | PCI_SLOT(edev->config_addr & 0xFF), | ||
356 | PCI_FUNC(edev->config_addr & 0xFF), | ||
357 | pe->addr); | ||
356 | return 0; | 358 | return 0; |
357 | } else if (pe && (pe->type & EEH_PE_INVALID)) { | 359 | } else if (pe && (pe->type & EEH_PE_INVALID)) { |
358 | list_add_tail(&edev->list, &pe->edevs); | 360 | list_add_tail(&edev->list, &pe->edevs); |
@@ -368,9 +370,14 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
368 | parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); | 370 | parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); |
369 | parent = parent->parent; | 371 | parent = parent->parent; |
370 | } | 372 | } |
371 | pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", | ||
372 | edev->dn->full_name, pe->addr, pe->parent->addr); | ||
373 | 373 | ||
374 | pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device " | ||
375 | "PE#%x, Parent PE#%x\n", | ||
376 | edev->phb->global_number, | ||
377 | edev->config_addr >> 8, | ||
378 | PCI_SLOT(edev->config_addr & 0xFF), | ||
379 | PCI_FUNC(edev->config_addr & 0xFF), | ||
380 | pe->addr, pe->parent->addr); | ||
374 | return 0; | 381 | return 0; |
375 | } | 382 | } |
376 | 383 | ||
@@ -409,8 +416,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) | |||
409 | list_add_tail(&pe->child, &parent->child_list); | 416 | list_add_tail(&pe->child, &parent->child_list); |
410 | list_add_tail(&edev->list, &pe->edevs); | 417 | list_add_tail(&edev->list, &pe->edevs); |
411 | edev->pe = pe; | 418 | edev->pe = pe; |
412 | pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", | 419 | pr_debug("EEH: Add %04x:%02x:%02x.%01x to " |
413 | edev->dn->full_name, pe->addr, pe->parent->addr); | 420 | "Device PE#%x, Parent PE#%x\n", |
421 | edev->phb->global_number, | ||
422 | edev->config_addr >> 8, | ||
423 | PCI_SLOT(edev->config_addr & 0xFF), | ||
424 | PCI_FUNC(edev->config_addr & 0xFF), | ||
425 | pe->addr, pe->parent->addr); | ||
414 | 426 | ||
415 | return 0; | 427 | return 0; |
416 | } | 428 | } |
@@ -430,8 +442,11 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) | |||
430 | int cnt; | 442 | int cnt; |
431 | 443 | ||
432 | if (!edev->pe) { | 444 | if (!edev->pe) { |
433 | pr_debug("%s: No PE found for EEH device %s\n", | 445 | pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", |
434 | __func__, edev->dn->full_name); | 446 | __func__, edev->phb->global_number, |
447 | edev->config_addr >> 8, | ||
448 | PCI_SLOT(edev->config_addr & 0xFF), | ||
449 | PCI_FUNC(edev->config_addr & 0xFF)); | ||
435 | return -EEXIST; | 450 | return -EEXIST; |
436 | } | 451 | } |
437 | 452 | ||
@@ -653,9 +668,9 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state) | |||
653 | * blocked on normal path during the stage. So we need utilize | 668 | * blocked on normal path during the stage. So we need utilize |
654 | * eeh operations, which is always permitted. | 669 | * eeh operations, which is always permitted. |
655 | */ | 670 | */ |
656 | static void eeh_bridge_check_link(struct eeh_dev *edev, | 671 | static void eeh_bridge_check_link(struct eeh_dev *edev) |
657 | struct device_node *dn) | ||
658 | { | 672 | { |
673 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
659 | int cap; | 674 | int cap; |
660 | uint32_t val; | 675 | uint32_t val; |
661 | int timeout = 0; | 676 | int timeout = 0; |
@@ -675,32 +690,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev, | |||
675 | 690 | ||
676 | /* Check slot status */ | 691 | /* Check slot status */ |
677 | cap = edev->pcie_cap; | 692 | cap = edev->pcie_cap; |
678 | eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); | 693 | eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val); |
679 | if (!(val & PCI_EXP_SLTSTA_PDS)) { | 694 | if (!(val & PCI_EXP_SLTSTA_PDS)) { |
680 | pr_debug(" No card in the slot (0x%04x) !\n", val); | 695 | pr_debug(" No card in the slot (0x%04x) !\n", val); |
681 | return; | 696 | return; |
682 | } | 697 | } |
683 | 698 | ||
684 | /* Check power status if we have the capability */ | 699 | /* Check power status if we have the capability */ |
685 | eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val); | 700 | eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val); |
686 | if (val & PCI_EXP_SLTCAP_PCP) { | 701 | if (val & PCI_EXP_SLTCAP_PCP) { |
687 | eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val); | 702 | eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val); |
688 | if (val & PCI_EXP_SLTCTL_PCC) { | 703 | if (val & PCI_EXP_SLTCTL_PCC) { |
689 | pr_debug(" In power-off state, power it on ...\n"); | 704 | pr_debug(" In power-off state, power it on ...\n"); |
690 | val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); | 705 | val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC); |
691 | val |= (0x0100 & PCI_EXP_SLTCTL_PIC); | 706 | val |= (0x0100 & PCI_EXP_SLTCTL_PIC); |
692 | eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val); | 707 | eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val); |
693 | msleep(2 * 1000); | 708 | msleep(2 * 1000); |
694 | } | 709 | } |
695 | } | 710 | } |
696 | 711 | ||
697 | /* Enable link */ | 712 | /* Enable link */ |
698 | eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val); | 713 | eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val); |
699 | val &= ~PCI_EXP_LNKCTL_LD; | 714 | val &= ~PCI_EXP_LNKCTL_LD; |
700 | eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val); | 715 | eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val); |
701 | 716 | ||
702 | /* Check link */ | 717 | /* Check link */ |
703 | eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val); | 718 | eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val); |
704 | if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { | 719 | if (!(val & PCI_EXP_LNKCAP_DLLLARC)) { |
705 | pr_debug(" No link reporting capability (0x%08x) \n", val); | 720 | pr_debug(" No link reporting capability (0x%08x) \n", val); |
706 | msleep(1000); | 721 | msleep(1000); |
@@ -713,7 +728,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev, | |||
713 | msleep(20); | 728 | msleep(20); |
714 | timeout += 20; | 729 | timeout += 20; |
715 | 730 | ||
716 | eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val); | 731 | eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val); |
717 | if (val & PCI_EXP_LNKSTA_DLLLA) | 732 | if (val & PCI_EXP_LNKSTA_DLLLA) |
718 | break; | 733 | break; |
719 | } | 734 | } |
@@ -728,9 +743,9 @@ static void eeh_bridge_check_link(struct eeh_dev *edev, | |||
728 | #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) | 743 | #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) |
729 | #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) | 744 | #define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) |
730 | 745 | ||
731 | static void eeh_restore_bridge_bars(struct eeh_dev *edev, | 746 | static void eeh_restore_bridge_bars(struct eeh_dev *edev) |
732 | struct device_node *dn) | ||
733 | { | 747 | { |
748 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
734 | int i; | 749 | int i; |
735 | 750 | ||
736 | /* | 751 | /* |
@@ -738,49 +753,49 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev, | |||
738 | * Bus numbers and windows: 0x18 - 0x30 | 753 | * Bus numbers and windows: 0x18 - 0x30 |
739 | */ | 754 | */ |
740 | for (i = 4; i < 13; i++) | 755 | for (i = 4; i < 13; i++) |
741 | eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); | 756 | eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); |
742 | /* Rom: 0x38 */ | 757 | /* Rom: 0x38 */ |
743 | eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]); | 758 | eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]); |
744 | 759 | ||
745 | /* Cache line & Latency timer: 0xC 0xD */ | 760 | /* Cache line & Latency timer: 0xC 0xD */ |
746 | eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, | 761 | eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, |
747 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); | 762 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); |
748 | eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, | 763 | eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, |
749 | SAVED_BYTE(PCI_LATENCY_TIMER)); | 764 | SAVED_BYTE(PCI_LATENCY_TIMER)); |
750 | /* Max latency, min grant, interrupt ping and line: 0x3C */ | 765 | /* Max latency, min grant, interrupt ping and line: 0x3C */ |
751 | eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); | 766 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
752 | 767 | ||
753 | /* PCI Command: 0x4 */ | 768 | /* PCI Command: 0x4 */ |
754 | eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); | 769 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); |
755 | 770 | ||
756 | /* Check the PCIe link is ready */ | 771 | /* Check the PCIe link is ready */ |
757 | eeh_bridge_check_link(edev, dn); | 772 | eeh_bridge_check_link(edev); |
758 | } | 773 | } |
759 | 774 | ||
760 | static void eeh_restore_device_bars(struct eeh_dev *edev, | 775 | static void eeh_restore_device_bars(struct eeh_dev *edev) |
761 | struct device_node *dn) | ||
762 | { | 776 | { |
777 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); | ||
763 | int i; | 778 | int i; |
764 | u32 cmd; | 779 | u32 cmd; |
765 | 780 | ||
766 | for (i = 4; i < 10; i++) | 781 | for (i = 4; i < 10; i++) |
767 | eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]); | 782 | eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]); |
768 | /* 12 == Expansion ROM Address */ | 783 | /* 12 == Expansion ROM Address */ |
769 | eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]); | 784 | eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]); |
770 | 785 | ||
771 | eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1, | 786 | eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1, |
772 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); | 787 | SAVED_BYTE(PCI_CACHE_LINE_SIZE)); |
773 | eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1, | 788 | eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1, |
774 | SAVED_BYTE(PCI_LATENCY_TIMER)); | 789 | SAVED_BYTE(PCI_LATENCY_TIMER)); |
775 | 790 | ||
776 | /* max latency, min grant, interrupt pin and line */ | 791 | /* max latency, min grant, interrupt pin and line */ |
777 | eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]); | 792 | eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); |
778 | 793 | ||
779 | /* | 794 | /* |
780 | * Restore PERR & SERR bits, some devices require it, | 795 | * Restore PERR & SERR bits, some devices require it, |
781 | * don't touch the other command bits | 796 | * don't touch the other command bits |
782 | */ | 797 | */ |
783 | eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd); | 798 | eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd); |
784 | if (edev->config_space[1] & PCI_COMMAND_PARITY) | 799 | if (edev->config_space[1] & PCI_COMMAND_PARITY) |
785 | cmd |= PCI_COMMAND_PARITY; | 800 | cmd |= PCI_COMMAND_PARITY; |
786 | else | 801 | else |
@@ -789,7 +804,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev, | |||
789 | cmd |= PCI_COMMAND_SERR; | 804 | cmd |= PCI_COMMAND_SERR; |
790 | else | 805 | else |
791 | cmd &= ~PCI_COMMAND_SERR; | 806 | cmd &= ~PCI_COMMAND_SERR; |
792 | eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd); | 807 | eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd); |
793 | } | 808 | } |
794 | 809 | ||
795 | /** | 810 | /** |
@@ -804,16 +819,16 @@ static void eeh_restore_device_bars(struct eeh_dev *edev, | |||
804 | static void *eeh_restore_one_device_bars(void *data, void *flag) | 819 | static void *eeh_restore_one_device_bars(void *data, void *flag) |
805 | { | 820 | { |
806 | struct eeh_dev *edev = (struct eeh_dev *)data; | 821 | struct eeh_dev *edev = (struct eeh_dev *)data; |
807 | struct device_node *dn = eeh_dev_to_of_node(edev); | 822 | struct pci_dn *pdn = eeh_dev_to_pdn(edev); |
808 | 823 | ||
809 | /* Do special restore for bridges */ | 824 | /* Do special restore for bridges */ |
810 | if (edev->mode & EEH_DEV_BRIDGE) | 825 | if (edev->mode & EEH_DEV_BRIDGE) |
811 | eeh_restore_bridge_bars(edev, dn); | 826 | eeh_restore_bridge_bars(edev); |
812 | else | 827 | else |
813 | eeh_restore_device_bars(edev, dn); | 828 | eeh_restore_device_bars(edev); |
814 | 829 | ||
815 | if (eeh_ops->restore_config) | 830 | if (eeh_ops->restore_config && pdn) |
816 | eeh_ops->restore_config(dn); | 831 | eeh_ops->restore_config(pdn); |
817 | 832 | ||
818 | return NULL; | 833 | return NULL; |
819 | } | 834 | } |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index d180caf2d6de..8ca9434c40e6 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | #include <asm/hw_irq.h> | 35 | #include <asm/hw_irq.h> |
36 | #include <asm/context_tracking.h> | 36 | #include <asm/context_tracking.h> |
37 | #include <asm/tm.h> | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * System calls. | 40 | * System calls. |
@@ -145,6 +146,24 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
145 | andi. r11,r10,_TIF_SYSCALL_DOTRACE | 146 | andi. r11,r10,_TIF_SYSCALL_DOTRACE |
146 | bne syscall_dotrace | 147 | bne syscall_dotrace |
147 | .Lsyscall_dotrace_cont: | 148 | .Lsyscall_dotrace_cont: |
149 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
150 | BEGIN_FTR_SECTION | ||
151 | b 1f | ||
152 | END_FTR_SECTION_IFCLR(CPU_FTR_TM) | ||
153 | extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ | ||
154 | beq+ 1f | ||
155 | |||
156 | /* Doom the transaction and don't perform the syscall: */ | ||
157 | mfmsr r11 | ||
158 | li r12, 1 | ||
159 | rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG | ||
160 | mtmsrd r11, 0 | ||
161 | li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) | ||
162 | TABORT(R11) | ||
163 | |||
164 | b .Lsyscall_exit | ||
165 | 1: | ||
166 | #endif | ||
148 | cmpldi 0,r0,NR_syscalls | 167 | cmpldi 0,r0,NR_syscalls |
149 | bge- syscall_enosys | 168 | bge- syscall_enosys |
150 | 169 | ||
@@ -356,6 +375,11 @@ _GLOBAL(ppc64_swapcontext) | |||
356 | bl sys_swapcontext | 375 | bl sys_swapcontext |
357 | b .Lsyscall_exit | 376 | b .Lsyscall_exit |
358 | 377 | ||
378 | _GLOBAL(ppc_switch_endian) | ||
379 | bl save_nvgprs | ||
380 | bl sys_switch_endian | ||
381 | b .Lsyscall_exit | ||
382 | |||
359 | _GLOBAL(ret_from_fork) | 383 | _GLOBAL(ret_from_fork) |
360 | bl schedule_tail | 384 | bl schedule_tail |
361 | REST_NVGPRS(r1) | 385 | REST_NVGPRS(r1) |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 05adc8bbdef8..eeaa0d5f69d5 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -94,6 +94,7 @@ _GLOBAL(power7_powersave_common) | |||
94 | beq 1f | 94 | beq 1f |
95 | addi r1,r1,INT_FRAME_SIZE | 95 | addi r1,r1,INT_FRAME_SIZE |
96 | ld r0,16(r1) | 96 | ld r0,16(r1) |
97 | li r3,0 /* Return 0 (no nap) */ | ||
97 | mtlr r0 | 98 | mtlr r0 |
98 | blr | 99 | blr |
99 | 100 | ||
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index b6f123ab90ed..2c647b1e62e4 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c | |||
@@ -28,6 +28,55 @@ | |||
28 | #include <asm/mce.h> | 28 | #include <asm/mce.h> |
29 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
30 | 30 | ||
31 | static void flush_tlb_206(unsigned int num_sets, unsigned int action) | ||
32 | { | ||
33 | unsigned long rb; | ||
34 | unsigned int i; | ||
35 | |||
36 | switch (action) { | ||
37 | case TLB_INVAL_SCOPE_GLOBAL: | ||
38 | rb = TLBIEL_INVAL_SET; | ||
39 | break; | ||
40 | case TLB_INVAL_SCOPE_LPID: | ||
41 | rb = TLBIEL_INVAL_SET_LPID; | ||
42 | break; | ||
43 | default: | ||
44 | BUG(); | ||
45 | break; | ||
46 | } | ||
47 | |||
48 | asm volatile("ptesync" : : : "memory"); | ||
49 | for (i = 0; i < num_sets; i++) { | ||
50 | asm volatile("tlbiel %0" : : "r" (rb)); | ||
51 | rb += 1 << TLBIEL_INVAL_SET_SHIFT; | ||
52 | } | ||
53 | asm volatile("ptesync" : : : "memory"); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Generic routine to flush TLB on power7. This routine is used as | ||
58 | * flush_tlb hook in cpu_spec for Power7 processor. | ||
59 | * | ||
60 | * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs. | ||
61 | * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID. | ||
62 | */ | ||
63 | void __flush_tlb_power7(unsigned int action) | ||
64 | { | ||
65 | flush_tlb_206(POWER7_TLB_SETS, action); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Generic routine to flush TLB on power8. This routine is used as | ||
70 | * flush_tlb hook in cpu_spec for power8 processor. | ||
71 | * | ||
72 | * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs. | ||
73 | * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID. | ||
74 | */ | ||
75 | void __flush_tlb_power8(unsigned int action) | ||
76 | { | ||
77 | flush_tlb_206(POWER8_TLB_SETS, action); | ||
78 | } | ||
79 | |||
31 | /* flush SLBs and reload */ | 80 | /* flush SLBs and reload */ |
32 | static void flush_and_reload_slb(void) | 81 | static void flush_and_reload_slb(void) |
33 | { | 82 | { |
@@ -79,7 +128,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) | |||
79 | } | 128 | } |
80 | if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { | 129 | if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { |
81 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) | 130 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) |
82 | cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); | 131 | cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL); |
83 | /* reset error bits */ | 132 | /* reset error bits */ |
84 | dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; | 133 | dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; |
85 | } | 134 | } |
@@ -110,7 +159,7 @@ static long mce_handle_common_ierror(uint64_t srr1) | |||
110 | break; | 159 | break; |
111 | case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: | 160 | case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: |
112 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { | 161 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { |
113 | cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); | 162 | cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL); |
114 | handled = 1; | 163 | handled = 1; |
115 | } | 164 | } |
116 | break; | 165 | break; |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 34f7c9b7cd96..1e703f8ebad4 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -26,6 +26,9 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/kmsg_dump.h> | ||
30 | #include <linux/pstore.h> | ||
31 | #include <linux/zlib.h> | ||
29 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
30 | #include <asm/nvram.h> | 33 | #include <asm/nvram.h> |
31 | #include <asm/rtas.h> | 34 | #include <asm/rtas.h> |
@@ -54,6 +57,680 @@ struct nvram_partition { | |||
54 | 57 | ||
55 | static LIST_HEAD(nvram_partitions); | 58 | static LIST_HEAD(nvram_partitions); |
56 | 59 | ||
60 | #ifdef CONFIG_PPC_PSERIES | ||
61 | struct nvram_os_partition rtas_log_partition = { | ||
62 | .name = "ibm,rtas-log", | ||
63 | .req_size = 2079, | ||
64 | .min_size = 1055, | ||
65 | .index = -1, | ||
66 | .os_partition = true | ||
67 | }; | ||
68 | #endif | ||
69 | |||
70 | struct nvram_os_partition oops_log_partition = { | ||
71 | .name = "lnx,oops-log", | ||
72 | .req_size = 4000, | ||
73 | .min_size = 2000, | ||
74 | .index = -1, | ||
75 | .os_partition = true | ||
76 | }; | ||
77 | |||
78 | static const char *nvram_os_partitions[] = { | ||
79 | #ifdef CONFIG_PPC_PSERIES | ||
80 | "ibm,rtas-log", | ||
81 | #endif | ||
82 | "lnx,oops-log", | ||
83 | NULL | ||
84 | }; | ||
85 | |||
86 | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||
87 | enum kmsg_dump_reason reason); | ||
88 | |||
89 | static struct kmsg_dumper nvram_kmsg_dumper = { | ||
90 | .dump = oops_to_nvram | ||
91 | }; | ||
92 | |||
93 | /* | ||
94 | * For capturing and compressing an oops or panic report... | ||
95 | |||
96 | * big_oops_buf[] holds the uncompressed text we're capturing. | ||
97 | * | ||
98 | * oops_buf[] holds the compressed text, preceded by a oops header. | ||
99 | * oops header has u16 holding the version of oops header (to differentiate | ||
100 | * between old and new format header) followed by u16 holding the length of | ||
101 | * the compressed* text (*Or uncompressed, if compression fails.) and u64 | ||
102 | * holding the timestamp. oops_buf[] gets written to NVRAM. | ||
103 | * | ||
104 | * oops_log_info points to the header. oops_data points to the compressed text. | ||
105 | * | ||
106 | * +- oops_buf | ||
107 | * | +- oops_data | ||
108 | * v v | ||
109 | * +-----------+-----------+-----------+------------------------+ | ||
110 | * | version | length | timestamp | text | | ||
111 | * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) | | ||
112 | * +-----------+-----------+-----------+------------------------+ | ||
113 | * ^ | ||
114 | * +- oops_log_info | ||
115 | * | ||
116 | * We preallocate these buffers during init to avoid kmalloc during oops/panic. | ||
117 | */ | ||
118 | static size_t big_oops_buf_sz; | ||
119 | static char *big_oops_buf, *oops_buf; | ||
120 | static char *oops_data; | ||
121 | static size_t oops_data_sz; | ||
122 | |||
123 | /* Compression parameters */ | ||
124 | #define COMPR_LEVEL 6 | ||
125 | #define WINDOW_BITS 12 | ||
126 | #define MEM_LEVEL 4 | ||
127 | static struct z_stream_s stream; | ||
128 | |||
129 | #ifdef CONFIG_PSTORE | ||
130 | #ifdef CONFIG_PPC_POWERNV | ||
131 | static struct nvram_os_partition skiboot_partition = { | ||
132 | .name = "ibm,skiboot", | ||
133 | .index = -1, | ||
134 | .os_partition = false | ||
135 | }; | ||
136 | #endif | ||
137 | |||
138 | #ifdef CONFIG_PPC_PSERIES | ||
139 | static struct nvram_os_partition of_config_partition = { | ||
140 | .name = "of-config", | ||
141 | .index = -1, | ||
142 | .os_partition = false | ||
143 | }; | ||
144 | #endif | ||
145 | |||
146 | static struct nvram_os_partition common_partition = { | ||
147 | .name = "common", | ||
148 | .index = -1, | ||
149 | .os_partition = false | ||
150 | }; | ||
151 | |||
152 | static enum pstore_type_id nvram_type_ids[] = { | ||
153 | PSTORE_TYPE_DMESG, | ||
154 | PSTORE_TYPE_PPC_COMMON, | ||
155 | -1, | ||
156 | -1, | ||
157 | -1 | ||
158 | }; | ||
159 | static int read_type; | ||
160 | #endif | ||
161 | |||
162 | /* nvram_write_os_partition | ||
163 | * | ||
164 | * We need to buffer the error logs into nvram to ensure that we have | ||
165 | * the failure information to decode. If we have a severe error there | ||
166 | * is no way to guarantee that the OS or the machine is in a state to | ||
167 | * get back to user land and write the error to disk. For example if | ||
168 | * the SCSI device driver causes a Machine Check by writing to a bad | ||
169 | * IO address, there is no way of guaranteeing that the device driver | ||
170 | * is in any state that is would also be able to write the error data | ||
171 | * captured to disk, thus we buffer it in NVRAM for analysis on the | ||
172 | * next boot. | ||
173 | * | ||
174 | * In NVRAM the partition containing the error log buffer will looks like: | ||
175 | * Header (in bytes): | ||
176 | * +-----------+----------+--------+------------+------------------+ | ||
177 | * | signature | checksum | length | name | data | | ||
178 | * |0 |1 |2 3|4 15|16 length-1| | ||
179 | * +-----------+----------+--------+------------+------------------+ | ||
180 | * | ||
181 | * The 'data' section would look like (in bytes): | ||
182 | * +--------------+------------+-----------------------------------+ | ||
183 | * | event_logged | sequence # | error log | | ||
184 | * |0 3|4 7|8 error_log_size-1| | ||
185 | * +--------------+------------+-----------------------------------+ | ||
186 | * | ||
187 | * event_logged: 0 if event has not been logged to syslog, 1 if it has | ||
188 | * sequence #: The unique sequence # for each event. (until it wraps) | ||
189 | * error log: The error log from event_scan | ||
190 | */ | ||
191 | int nvram_write_os_partition(struct nvram_os_partition *part, | ||
192 | char *buff, int length, | ||
193 | unsigned int err_type, | ||
194 | unsigned int error_log_cnt) | ||
195 | { | ||
196 | int rc; | ||
197 | loff_t tmp_index; | ||
198 | struct err_log_info info; | ||
199 | |||
200 | if (part->index == -1) | ||
201 | return -ESPIPE; | ||
202 | |||
203 | if (length > part->size) | ||
204 | length = part->size; | ||
205 | |||
206 | info.error_type = cpu_to_be32(err_type); | ||
207 | info.seq_num = cpu_to_be32(error_log_cnt); | ||
208 | |||
209 | tmp_index = part->index; | ||
210 | |||
211 | rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), | ||
212 | &tmp_index); | ||
213 | if (rc <= 0) { | ||
214 | pr_err("%s: Failed nvram_write (%d)\n", __func__, rc); | ||
215 | return rc; | ||
216 | } | ||
217 | |||
218 | rc = ppc_md.nvram_write(buff, length, &tmp_index); | ||
219 | if (rc <= 0) { | ||
220 | pr_err("%s: Failed nvram_write (%d)\n", __func__, rc); | ||
221 | return rc; | ||
222 | } | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | /* nvram_read_partition | ||
228 | * | ||
229 | * Reads nvram partition for at most 'length' | ||
230 | */ | ||
231 | int nvram_read_partition(struct nvram_os_partition *part, char *buff, | ||
232 | int length, unsigned int *err_type, | ||
233 | unsigned int *error_log_cnt) | ||
234 | { | ||
235 | int rc; | ||
236 | loff_t tmp_index; | ||
237 | struct err_log_info info; | ||
238 | |||
239 | if (part->index == -1) | ||
240 | return -1; | ||
241 | |||
242 | if (length > part->size) | ||
243 | length = part->size; | ||
244 | |||
245 | tmp_index = part->index; | ||
246 | |||
247 | if (part->os_partition) { | ||
248 | rc = ppc_md.nvram_read((char *)&info, | ||
249 | sizeof(struct err_log_info), | ||
250 | &tmp_index); | ||
251 | if (rc <= 0) { | ||
252 | pr_err("%s: Failed nvram_read (%d)\n", __func__, rc); | ||
253 | return rc; | ||
254 | } | ||
255 | } | ||
256 | |||
257 | rc = ppc_md.nvram_read(buff, length, &tmp_index); | ||
258 | if (rc <= 0) { | ||
259 | pr_err("%s: Failed nvram_read (%d)\n", __func__, rc); | ||
260 | return rc; | ||
261 | } | ||
262 | |||
263 | if (part->os_partition) { | ||
264 | *error_log_cnt = be32_to_cpu(info.seq_num); | ||
265 | *err_type = be32_to_cpu(info.error_type); | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | /* nvram_init_os_partition | ||
272 | * | ||
273 | * This sets up a partition with an "OS" signature. | ||
274 | * | ||
275 | * The general strategy is the following: | ||
276 | * 1.) If a partition with the indicated name already exists... | ||
277 | * - If it's large enough, use it. | ||
278 | * - Otherwise, recycle it and keep going. | ||
279 | * 2.) Search for a free partition that is large enough. | ||
280 | * 3.) If there's not a free partition large enough, recycle any obsolete | ||
281 | * OS partitions and try again. | ||
282 | * 4.) Will first try getting a chunk that will satisfy the requested size. | ||
283 | * 5.) If a chunk of the requested size cannot be allocated, then try finding | ||
284 | * a chunk that will satisfy the minum needed. | ||
285 | * | ||
286 | * Returns 0 on success, else -1. | ||
287 | */ | ||
288 | int __init nvram_init_os_partition(struct nvram_os_partition *part) | ||
289 | { | ||
290 | loff_t p; | ||
291 | int size; | ||
292 | |||
293 | /* Look for ours */ | ||
294 | p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); | ||
295 | |||
296 | /* Found one but too small, remove it */ | ||
297 | if (p && size < part->min_size) { | ||
298 | pr_info("nvram: Found too small %s partition," | ||
299 | " removing it...\n", part->name); | ||
300 | nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL); | ||
301 | p = 0; | ||
302 | } | ||
303 | |||
304 | /* Create one if we didn't find */ | ||
305 | if (!p) { | ||
306 | p = nvram_create_partition(part->name, NVRAM_SIG_OS, | ||
307 | part->req_size, part->min_size); | ||
308 | if (p == -ENOSPC) { | ||
309 | pr_info("nvram: No room to create %s partition, " | ||
310 | "deleting any obsolete OS partitions...\n", | ||
311 | part->name); | ||
312 | nvram_remove_partition(NULL, NVRAM_SIG_OS, | ||
313 | nvram_os_partitions); | ||
314 | p = nvram_create_partition(part->name, NVRAM_SIG_OS, | ||
315 | part->req_size, part->min_size); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | if (p <= 0) { | ||
320 | pr_err("nvram: Failed to find or create %s" | ||
321 | " partition, err %d\n", part->name, (int)p); | ||
322 | return -1; | ||
323 | } | ||
324 | |||
325 | part->index = p; | ||
326 | part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info); | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | /* Derived from logfs_compress() */ | ||
332 | static int nvram_compress(const void *in, void *out, size_t inlen, | ||
333 | size_t outlen) | ||
334 | { | ||
335 | int err, ret; | ||
336 | |||
337 | ret = -EIO; | ||
338 | err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS, | ||
339 | MEM_LEVEL, Z_DEFAULT_STRATEGY); | ||
340 | if (err != Z_OK) | ||
341 | goto error; | ||
342 | |||
343 | stream.next_in = in; | ||
344 | stream.avail_in = inlen; | ||
345 | stream.total_in = 0; | ||
346 | stream.next_out = out; | ||
347 | stream.avail_out = outlen; | ||
348 | stream.total_out = 0; | ||
349 | |||
350 | err = zlib_deflate(&stream, Z_FINISH); | ||
351 | if (err != Z_STREAM_END) | ||
352 | goto error; | ||
353 | |||
354 | err = zlib_deflateEnd(&stream); | ||
355 | if (err != Z_OK) | ||
356 | goto error; | ||
357 | |||
358 | if (stream.total_out >= stream.total_in) | ||
359 | goto error; | ||
360 | |||
361 | ret = stream.total_out; | ||
362 | error: | ||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | /* Compress the text from big_oops_buf into oops_buf. */ | ||
367 | static int zip_oops(size_t text_len) | ||
368 | { | ||
369 | struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; | ||
370 | int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len, | ||
371 | oops_data_sz); | ||
372 | if (zipped_len < 0) { | ||
373 | pr_err("nvram: compression failed; returned %d\n", zipped_len); | ||
374 | pr_err("nvram: logging uncompressed oops/panic report\n"); | ||
375 | return -1; | ||
376 | } | ||
377 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); | ||
378 | oops_hdr->report_length = cpu_to_be16(zipped_len); | ||
379 | oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds()); | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | #ifdef CONFIG_PSTORE | ||
384 | static int nvram_pstore_open(struct pstore_info *psi) | ||
385 | { | ||
386 | /* Reset the iterator to start reading partitions again */ | ||
387 | read_type = -1; | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | /** | ||
392 | * nvram_pstore_write - pstore write callback for nvram | ||
393 | * @type: Type of message logged | ||
394 | * @reason: reason behind dump (oops/panic) | ||
395 | * @id: identifier to indicate the write performed | ||
396 | * @part: pstore writes data to registered buffer in parts, | ||
397 | * part number will indicate the same. | ||
398 | * @count: Indicates oops count | ||
399 | * @compressed: Flag to indicate the log is compressed | ||
400 | * @size: number of bytes written to the registered buffer | ||
401 | * @psi: registered pstore_info structure | ||
402 | * | ||
403 | * Called by pstore_dump() when an oops or panic report is logged in the | ||
404 | * printk buffer. | ||
405 | * Returns 0 on successful write. | ||
406 | */ | ||
407 | static int nvram_pstore_write(enum pstore_type_id type, | ||
408 | enum kmsg_dump_reason reason, | ||
409 | u64 *id, unsigned int part, int count, | ||
410 | bool compressed, size_t size, | ||
411 | struct pstore_info *psi) | ||
412 | { | ||
413 | int rc; | ||
414 | unsigned int err_type = ERR_TYPE_KERNEL_PANIC; | ||
415 | struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf; | ||
416 | |||
417 | /* part 1 has the recent messages from printk buffer */ | ||
418 | if (part > 1 || (type != PSTORE_TYPE_DMESG)) | ||
419 | return -1; | ||
420 | |||
421 | if (clobbering_unread_rtas_event()) | ||
422 | return -1; | ||
423 | |||
424 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); | ||
425 | oops_hdr->report_length = cpu_to_be16(size); | ||
426 | oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds()); | ||
427 | |||
428 | if (compressed) | ||
429 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; | ||
430 | |||
431 | rc = nvram_write_os_partition(&oops_log_partition, oops_buf, | ||
432 | (int) (sizeof(*oops_hdr) + size), err_type, count); | ||
433 | |||
434 | if (rc != 0) | ||
435 | return rc; | ||
436 | |||
437 | *id = part; | ||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Reads the oops/panic report, rtas, of-config and common partition. | ||
443 | * Returns the length of the data we read from each partition. | ||
444 | * Returns 0 if we've been called before. | ||
445 | */ | ||
446 | static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | ||
447 | int *count, struct timespec *time, char **buf, | ||
448 | bool *compressed, struct pstore_info *psi) | ||
449 | { | ||
450 | struct oops_log_info *oops_hdr; | ||
451 | unsigned int err_type, id_no, size = 0; | ||
452 | struct nvram_os_partition *part = NULL; | ||
453 | char *buff = NULL; | ||
454 | int sig = 0; | ||
455 | loff_t p; | ||
456 | |||
457 | read_type++; | ||
458 | |||
459 | switch (nvram_type_ids[read_type]) { | ||
460 | case PSTORE_TYPE_DMESG: | ||
461 | part = &oops_log_partition; | ||
462 | *type = PSTORE_TYPE_DMESG; | ||
463 | break; | ||
464 | case PSTORE_TYPE_PPC_COMMON: | ||
465 | sig = NVRAM_SIG_SYS; | ||
466 | part = &common_partition; | ||
467 | *type = PSTORE_TYPE_PPC_COMMON; | ||
468 | *id = PSTORE_TYPE_PPC_COMMON; | ||
469 | time->tv_sec = 0; | ||
470 | time->tv_nsec = 0; | ||
471 | break; | ||
472 | #ifdef CONFIG_PPC_PSERIES | ||
473 | case PSTORE_TYPE_PPC_RTAS: | ||
474 | part = &rtas_log_partition; | ||
475 | *type = PSTORE_TYPE_PPC_RTAS; | ||
476 | time->tv_sec = last_rtas_event; | ||
477 | time->tv_nsec = 0; | ||
478 | break; | ||
479 | case PSTORE_TYPE_PPC_OF: | ||
480 | sig = NVRAM_SIG_OF; | ||
481 | part = &of_config_partition; | ||
482 | *type = PSTORE_TYPE_PPC_OF; | ||
483 | *id = PSTORE_TYPE_PPC_OF; | ||
484 | time->tv_sec = 0; | ||
485 | time->tv_nsec = 0; | ||
486 | break; | ||
487 | #endif | ||
488 | #ifdef CONFIG_PPC_POWERNV | ||
489 | case PSTORE_TYPE_PPC_OPAL: | ||
490 | sig = NVRAM_SIG_FW; | ||
491 | part = &skiboot_partition; | ||
492 | *type = PSTORE_TYPE_PPC_OPAL; | ||
493 | *id = PSTORE_TYPE_PPC_OPAL; | ||
494 | time->tv_sec = 0; | ||
495 | time->tv_nsec = 0; | ||
496 | break; | ||
497 | #endif | ||
498 | default: | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | if (!part->os_partition) { | ||
503 | p = nvram_find_partition(part->name, sig, &size); | ||
504 | if (p <= 0) { | ||
505 | pr_err("nvram: Failed to find partition %s, " | ||
506 | "err %d\n", part->name, (int)p); | ||
507 | return 0; | ||
508 | } | ||
509 | part->index = p; | ||
510 | part->size = size; | ||
511 | } | ||
512 | |||
513 | buff = kmalloc(part->size, GFP_KERNEL); | ||
514 | |||
515 | if (!buff) | ||
516 | return -ENOMEM; | ||
517 | |||
518 | if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) { | ||
519 | kfree(buff); | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | *count = 0; | ||
524 | |||
525 | if (part->os_partition) | ||
526 | *id = id_no; | ||
527 | |||
528 | if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { | ||
529 | size_t length, hdr_size; | ||
530 | |||
531 | oops_hdr = (struct oops_log_info *)buff; | ||
532 | if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { | ||
533 | /* Old format oops header had 2-byte record size */ | ||
534 | hdr_size = sizeof(u16); | ||
535 | length = be16_to_cpu(oops_hdr->version); | ||
536 | time->tv_sec = 0; | ||
537 | time->tv_nsec = 0; | ||
538 | } else { | ||
539 | hdr_size = sizeof(*oops_hdr); | ||
540 | length = be16_to_cpu(oops_hdr->report_length); | ||
541 | time->tv_sec = be64_to_cpu(oops_hdr->timestamp); | ||
542 | time->tv_nsec = 0; | ||
543 | } | ||
544 | *buf = kmalloc(length, GFP_KERNEL); | ||
545 | if (*buf == NULL) | ||
546 | return -ENOMEM; | ||
547 | memcpy(*buf, buff + hdr_size, length); | ||
548 | kfree(buff); | ||
549 | |||
550 | if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) | ||
551 | *compressed = true; | ||
552 | else | ||
553 | *compressed = false; | ||
554 | return length; | ||
555 | } | ||
556 | |||
557 | *buf = buff; | ||
558 | return part->size; | ||
559 | } | ||
560 | |||
561 | static struct pstore_info nvram_pstore_info = { | ||
562 | .owner = THIS_MODULE, | ||
563 | .name = "nvram", | ||
564 | .open = nvram_pstore_open, | ||
565 | .read = nvram_pstore_read, | ||
566 | .write = nvram_pstore_write, | ||
567 | }; | ||
568 | |||
569 | static int nvram_pstore_init(void) | ||
570 | { | ||
571 | int rc = 0; | ||
572 | |||
573 | if (machine_is(pseries)) { | ||
574 | nvram_type_ids[2] = PSTORE_TYPE_PPC_RTAS; | ||
575 | nvram_type_ids[3] = PSTORE_TYPE_PPC_OF; | ||
576 | } else | ||
577 | nvram_type_ids[2] = PSTORE_TYPE_PPC_OPAL; | ||
578 | |||
579 | nvram_pstore_info.buf = oops_data; | ||
580 | nvram_pstore_info.bufsize = oops_data_sz; | ||
581 | |||
582 | spin_lock_init(&nvram_pstore_info.buf_lock); | ||
583 | |||
584 | rc = pstore_register(&nvram_pstore_info); | ||
585 | if (rc != 0) | ||
586 | pr_err("nvram: pstore_register() failed, defaults to " | ||
587 | "kmsg_dump; returned %d\n", rc); | ||
588 | |||
589 | return rc; | ||
590 | } | ||
591 | #else | ||
592 | static int nvram_pstore_init(void) | ||
593 | { | ||
594 | return -1; | ||
595 | } | ||
596 | #endif | ||
597 | |||
598 | void __init nvram_init_oops_partition(int rtas_partition_exists) | ||
599 | { | ||
600 | int rc; | ||
601 | |||
602 | rc = nvram_init_os_partition(&oops_log_partition); | ||
603 | if (rc != 0) { | ||
604 | #ifdef CONFIG_PPC_PSERIES | ||
605 | if (!rtas_partition_exists) { | ||
606 | pr_err("nvram: Failed to initialize oops partition!"); | ||
607 | return; | ||
608 | } | ||
609 | pr_notice("nvram: Using %s partition to log both" | ||
610 | " RTAS errors and oops/panic reports\n", | ||
611 | rtas_log_partition.name); | ||
612 | memcpy(&oops_log_partition, &rtas_log_partition, | ||
613 | sizeof(rtas_log_partition)); | ||
614 | #else | ||
615 | pr_err("nvram: Failed to initialize oops partition!"); | ||
616 | return; | ||
617 | #endif | ||
618 | } | ||
619 | oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL); | ||
620 | if (!oops_buf) { | ||
621 | pr_err("nvram: No memory for %s partition\n", | ||
622 | oops_log_partition.name); | ||
623 | return; | ||
624 | } | ||
625 | oops_data = oops_buf + sizeof(struct oops_log_info); | ||
626 | oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); | ||
627 | |||
628 | rc = nvram_pstore_init(); | ||
629 | |||
630 | if (!rc) | ||
631 | return; | ||
632 | |||
633 | /* | ||
634 | * Figure compression (preceded by elimination of each line's <n> | ||
635 | * severity prefix) will reduce the oops/panic report to at most | ||
636 | * 45% of its original size. | ||
637 | */ | ||
638 | big_oops_buf_sz = (oops_data_sz * 100) / 45; | ||
639 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); | ||
640 | if (big_oops_buf) { | ||
641 | stream.workspace = kmalloc(zlib_deflate_workspacesize( | ||
642 | WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); | ||
643 | if (!stream.workspace) { | ||
644 | pr_err("nvram: No memory for compression workspace; " | ||
645 | "skipping compression of %s partition data\n", | ||
646 | oops_log_partition.name); | ||
647 | kfree(big_oops_buf); | ||
648 | big_oops_buf = NULL; | ||
649 | } | ||
650 | } else { | ||
651 | pr_err("No memory for uncompressed %s data; " | ||
652 | "skipping compression\n", oops_log_partition.name); | ||
653 | stream.workspace = NULL; | ||
654 | } | ||
655 | |||
656 | rc = kmsg_dump_register(&nvram_kmsg_dumper); | ||
657 | if (rc != 0) { | ||
658 | pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); | ||
659 | kfree(oops_buf); | ||
660 | kfree(big_oops_buf); | ||
661 | kfree(stream.workspace); | ||
662 | } | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * This is our kmsg_dump callback, called after an oops or panic report | ||
667 | * has been written to the printk buffer. We want to capture as much | ||
668 | * of the printk buffer as possible. First, capture as much as we can | ||
669 | * that we think will compress sufficiently to fit in the lnx,oops-log | ||
670 | * partition. If that's too much, go back and capture uncompressed text. | ||
671 | */ | ||
672 | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||
673 | enum kmsg_dump_reason reason) | ||
674 | { | ||
675 | struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; | ||
676 | static unsigned int oops_count = 0; | ||
677 | static bool panicking = false; | ||
678 | static DEFINE_SPINLOCK(lock); | ||
679 | unsigned long flags; | ||
680 | size_t text_len; | ||
681 | unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ; | ||
682 | int rc = -1; | ||
683 | |||
684 | switch (reason) { | ||
685 | case KMSG_DUMP_RESTART: | ||
686 | case KMSG_DUMP_HALT: | ||
687 | case KMSG_DUMP_POWEROFF: | ||
688 | /* These are almost always orderly shutdowns. */ | ||
689 | return; | ||
690 | case KMSG_DUMP_OOPS: | ||
691 | break; | ||
692 | case KMSG_DUMP_PANIC: | ||
693 | panicking = true; | ||
694 | break; | ||
695 | case KMSG_DUMP_EMERG: | ||
696 | if (panicking) | ||
697 | /* Panic report already captured. */ | ||
698 | return; | ||
699 | break; | ||
700 | default: | ||
701 | pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", | ||
702 | __func__, (int) reason); | ||
703 | return; | ||
704 | } | ||
705 | |||
706 | if (clobbering_unread_rtas_event()) | ||
707 | return; | ||
708 | |||
709 | if (!spin_trylock_irqsave(&lock, flags)) | ||
710 | return; | ||
711 | |||
712 | if (big_oops_buf) { | ||
713 | kmsg_dump_get_buffer(dumper, false, | ||
714 | big_oops_buf, big_oops_buf_sz, &text_len); | ||
715 | rc = zip_oops(text_len); | ||
716 | } | ||
717 | if (rc != 0) { | ||
718 | kmsg_dump_rewind(dumper); | ||
719 | kmsg_dump_get_buffer(dumper, false, | ||
720 | oops_data, oops_data_sz, &text_len); | ||
721 | err_type = ERR_TYPE_KERNEL_PANIC; | ||
722 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); | ||
723 | oops_hdr->report_length = cpu_to_be16(text_len); | ||
724 | oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds()); | ||
725 | } | ||
726 | |||
727 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | ||
728 | (int) (sizeof(*oops_hdr) + text_len), err_type, | ||
729 | ++oops_count); | ||
730 | |||
731 | spin_unlock_irqrestore(&lock, flags); | ||
732 | } | ||
733 | |||
57 | static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) | 734 | static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) |
58 | { | 735 | { |
59 | int size; | 736 | int size; |
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 2f35a72642c6..b60a67d92ebd 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -72,7 +72,7 @@ static int of_pci_phb_probe(struct platform_device *dev) | |||
72 | 72 | ||
73 | /* Register devices with EEH */ | 73 | /* Register devices with EEH */ |
74 | if (dev->dev.of_node->child) | 74 | if (dev->dev.of_node->child) |
75 | eeh_add_device_tree_early(dev->dev.of_node); | 75 | eeh_add_device_tree_early(PCI_DN(dev->dev.of_node)); |
76 | 76 | ||
77 | /* Scan the bus */ | 77 | /* Scan the bus */ |
78 | pcibios_scan_phb(phb); | 78 | pcibios_scan_phb(phb); |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 2a525c938158..0d054068a21d 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -76,7 +76,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev) | |||
76 | list_add_tail(&phb->list_node, &hose_list); | 76 | list_add_tail(&phb->list_node, &hose_list); |
77 | spin_unlock(&hose_spinlock); | 77 | spin_unlock(&hose_spinlock); |
78 | phb->dn = dev; | 78 | phb->dn = dev; |
79 | phb->is_dynamic = mem_init_done; | 79 | phb->is_dynamic = slab_is_available(); |
80 | #ifdef CONFIG_PPC64 | 80 | #ifdef CONFIG_PPC64 |
81 | if (dev) { | 81 | if (dev) { |
82 | int nid = of_node_to_nid(dev); | 82 | int nid = of_node_to_nid(dev); |
@@ -109,8 +109,10 @@ void pcibios_free_controller(struct pci_controller *phb) | |||
109 | resource_size_t pcibios_window_alignment(struct pci_bus *bus, | 109 | resource_size_t pcibios_window_alignment(struct pci_bus *bus, |
110 | unsigned long type) | 110 | unsigned long type) |
111 | { | 111 | { |
112 | if (ppc_md.pcibios_window_alignment) | 112 | struct pci_controller *phb = pci_bus_to_host(bus); |
113 | return ppc_md.pcibios_window_alignment(bus, type); | 113 | |
114 | if (phb->controller_ops.window_alignment) | ||
115 | return phb->controller_ops.window_alignment(bus, type); | ||
114 | 116 | ||
115 | /* | 117 | /* |
116 | * PCI core will figure out the default | 118 | * PCI core will figure out the default |
@@ -122,14 +124,26 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, | |||
122 | 124 | ||
123 | void pcibios_reset_secondary_bus(struct pci_dev *dev) | 125 | void pcibios_reset_secondary_bus(struct pci_dev *dev) |
124 | { | 126 | { |
125 | if (ppc_md.pcibios_reset_secondary_bus) { | 127 | struct pci_controller *phb = pci_bus_to_host(dev->bus); |
126 | ppc_md.pcibios_reset_secondary_bus(dev); | 128 | |
129 | if (phb->controller_ops.reset_secondary_bus) { | ||
130 | phb->controller_ops.reset_secondary_bus(dev); | ||
127 | return; | 131 | return; |
128 | } | 132 | } |
129 | 133 | ||
130 | pci_reset_secondary_bus(dev); | 134 | pci_reset_secondary_bus(dev); |
131 | } | 135 | } |
132 | 136 | ||
137 | #ifdef CONFIG_PCI_IOV | ||
138 | resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno) | ||
139 | { | ||
140 | if (ppc_md.pcibios_iov_resource_alignment) | ||
141 | return ppc_md.pcibios_iov_resource_alignment(pdev, resno); | ||
142 | |||
143 | return pci_iov_resource_size(pdev, resno); | ||
144 | } | ||
145 | #endif /* CONFIG_PCI_IOV */ | ||
146 | |||
133 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) | 147 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) |
134 | { | 148 | { |
135 | #ifdef CONFIG_PPC64 | 149 | #ifdef CONFIG_PPC64 |
@@ -788,6 +802,10 @@ static void pcibios_fixup_resources(struct pci_dev *dev) | |||
788 | pci_name(dev)); | 802 | pci_name(dev)); |
789 | return; | 803 | return; |
790 | } | 804 | } |
805 | |||
806 | if (dev->is_virtfn) | ||
807 | return; | ||
808 | |||
791 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 809 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
792 | struct resource *res = dev->resource + i; | 810 | struct resource *res = dev->resource + i; |
793 | struct pci_bus_region reg; | 811 | struct pci_bus_region reg; |
@@ -942,6 +960,8 @@ static void pcibios_fixup_bridge(struct pci_bus *bus) | |||
942 | 960 | ||
943 | void pcibios_setup_bus_self(struct pci_bus *bus) | 961 | void pcibios_setup_bus_self(struct pci_bus *bus) |
944 | { | 962 | { |
963 | struct pci_controller *phb; | ||
964 | |||
945 | /* Fix up the bus resources for P2P bridges */ | 965 | /* Fix up the bus resources for P2P bridges */ |
946 | if (bus->self != NULL) | 966 | if (bus->self != NULL) |
947 | pcibios_fixup_bridge(bus); | 967 | pcibios_fixup_bridge(bus); |
@@ -953,12 +973,14 @@ void pcibios_setup_bus_self(struct pci_bus *bus) | |||
953 | ppc_md.pcibios_fixup_bus(bus); | 973 | ppc_md.pcibios_fixup_bus(bus); |
954 | 974 | ||
955 | /* Setup bus DMA mappings */ | 975 | /* Setup bus DMA mappings */ |
956 | if (ppc_md.pci_dma_bus_setup) | 976 | phb = pci_bus_to_host(bus); |
957 | ppc_md.pci_dma_bus_setup(bus); | 977 | if (phb->controller_ops.dma_bus_setup) |
978 | phb->controller_ops.dma_bus_setup(bus); | ||
958 | } | 979 | } |
959 | 980 | ||
960 | static void pcibios_setup_device(struct pci_dev *dev) | 981 | static void pcibios_setup_device(struct pci_dev *dev) |
961 | { | 982 | { |
983 | struct pci_controller *phb; | ||
962 | /* Fixup NUMA node as it may not be setup yet by the generic | 984 | /* Fixup NUMA node as it may not be setup yet by the generic |
963 | * code and is needed by the DMA init | 985 | * code and is needed by the DMA init |
964 | */ | 986 | */ |
@@ -969,8 +991,9 @@ static void pcibios_setup_device(struct pci_dev *dev) | |||
969 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); | 991 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); |
970 | 992 | ||
971 | /* Additional platform DMA/iommu setup */ | 993 | /* Additional platform DMA/iommu setup */ |
972 | if (ppc_md.pci_dma_dev_setup) | 994 | phb = pci_bus_to_host(dev->bus); |
973 | ppc_md.pci_dma_dev_setup(dev); | 995 | if (phb->controller_ops.dma_dev_setup) |
996 | phb->controller_ops.dma_dev_setup(dev); | ||
974 | 997 | ||
975 | /* Read default IRQs and fixup if necessary */ | 998 | /* Read default IRQs and fixup if necessary */ |
976 | pci_read_irq_line(dev); | 999 | pci_read_irq_line(dev); |
@@ -986,6 +1009,12 @@ int pcibios_add_device(struct pci_dev *dev) | |||
986 | */ | 1009 | */ |
987 | if (dev->bus->is_added) | 1010 | if (dev->bus->is_added) |
988 | pcibios_setup_device(dev); | 1011 | pcibios_setup_device(dev); |
1012 | |||
1013 | #ifdef CONFIG_PCI_IOV | ||
1014 | if (ppc_md.pcibios_fixup_sriov) | ||
1015 | ppc_md.pcibios_fixup_sriov(dev); | ||
1016 | #endif /* CONFIG_PCI_IOV */ | ||
1017 | |||
989 | return 0; | 1018 | return 0; |
990 | } | 1019 | } |
991 | 1020 | ||
@@ -1450,8 +1479,10 @@ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); | |||
1450 | 1479 | ||
1451 | int pcibios_enable_device(struct pci_dev *dev, int mask) | 1480 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
1452 | { | 1481 | { |
1453 | if (ppc_md.pcibios_enable_device_hook) | 1482 | struct pci_controller *phb = pci_bus_to_host(dev->bus); |
1454 | if (ppc_md.pcibios_enable_device_hook(dev)) | 1483 | |
1484 | if (phb->controller_ops.enable_device_hook) | ||
1485 | if (!phb->controller_ops.enable_device_hook(dev)) | ||
1455 | return -EINVAL; | 1486 | return -EINVAL; |
1456 | 1487 | ||
1457 | return pci_enable_resources(dev, mask); | 1488 | return pci_enable_resources(dev, mask); |
@@ -1624,8 +1655,8 @@ void pcibios_scan_phb(struct pci_controller *hose) | |||
1624 | 1655 | ||
1625 | /* Get probe mode and perform scan */ | 1656 | /* Get probe mode and perform scan */ |
1626 | mode = PCI_PROBE_NORMAL; | 1657 | mode = PCI_PROBE_NORMAL; |
1627 | if (node && ppc_md.pci_probe_mode) | 1658 | if (node && hose->controller_ops.probe_mode) |
1628 | mode = ppc_md.pci_probe_mode(bus); | 1659 | mode = hose->controller_ops.probe_mode(bus); |
1629 | pr_debug(" probe mode: %d\n", mode); | 1660 | pr_debug(" probe mode: %d\n", mode); |
1630 | if (mode == PCI_PROBE_DEVTREE) | 1661 | if (mode == PCI_PROBE_DEVTREE) |
1631 | of_scan_bus(node, bus); | 1662 | of_scan_bus(node, bus); |
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index 5b789177aa29..7ed85a69a9c2 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c | |||
@@ -73,13 +73,16 @@ void pcibios_add_pci_devices(struct pci_bus * bus) | |||
73 | { | 73 | { |
74 | int slotno, mode, pass, max; | 74 | int slotno, mode, pass, max; |
75 | struct pci_dev *dev; | 75 | struct pci_dev *dev; |
76 | struct pci_controller *phb; | ||
76 | struct device_node *dn = pci_bus_to_OF_node(bus); | 77 | struct device_node *dn = pci_bus_to_OF_node(bus); |
77 | 78 | ||
78 | eeh_add_device_tree_early(dn); | 79 | eeh_add_device_tree_early(PCI_DN(dn)); |
80 | |||
81 | phb = pci_bus_to_host(bus); | ||
79 | 82 | ||
80 | mode = PCI_PROBE_NORMAL; | 83 | mode = PCI_PROBE_NORMAL; |
81 | if (ppc_md.pci_probe_mode) | 84 | if (phb->controller_ops.probe_mode) |
82 | mode = ppc_md.pci_probe_mode(bus); | 85 | mode = phb->controller_ops.probe_mode(bus); |
83 | 86 | ||
84 | if (mode == PCI_PROBE_DEVTREE) { | 87 | if (mode == PCI_PROBE_DEVTREE) { |
85 | /* use ofdt-based probe */ | 88 | /* use ofdt-based probe */ |
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 83df3075d3df..b3b4df91b792 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c | |||
@@ -32,12 +32,237 @@ | |||
32 | #include <asm/ppc-pci.h> | 32 | #include <asm/ppc-pci.h> |
33 | #include <asm/firmware.h> | 33 | #include <asm/firmware.h> |
34 | 34 | ||
35 | /* | ||
36 | * The function is used to find the firmware data of one | ||
37 | * specific PCI device, which is attached to the indicated | ||
38 | * PCI bus. For VFs, their firmware data is linked to that | ||
39 | * one of PF's bridge. For other devices, their firmware | ||
40 | * data is linked to that of their bridge. | ||
41 | */ | ||
42 | static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus) | ||
43 | { | ||
44 | struct pci_bus *pbus; | ||
45 | struct device_node *dn; | ||
46 | struct pci_dn *pdn; | ||
47 | |||
48 | /* | ||
49 | * We probably have virtual bus which doesn't | ||
50 | * have associated bridge. | ||
51 | */ | ||
52 | pbus = bus; | ||
53 | while (pbus) { | ||
54 | if (pci_is_root_bus(pbus) || pbus->self) | ||
55 | break; | ||
56 | |||
57 | pbus = pbus->parent; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Except virtual bus, all PCI buses should | ||
62 | * have device nodes. | ||
63 | */ | ||
64 | dn = pci_bus_to_OF_node(pbus); | ||
65 | pdn = dn ? PCI_DN(dn) : NULL; | ||
66 | |||
67 | return pdn; | ||
68 | } | ||
69 | |||
70 | struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus, | ||
71 | int devfn) | ||
72 | { | ||
73 | struct device_node *dn = NULL; | ||
74 | struct pci_dn *parent, *pdn; | ||
75 | struct pci_dev *pdev = NULL; | ||
76 | |||
77 | /* Fast path: fetch from PCI device */ | ||
78 | list_for_each_entry(pdev, &bus->devices, bus_list) { | ||
79 | if (pdev->devfn == devfn) { | ||
80 | if (pdev->dev.archdata.pci_data) | ||
81 | return pdev->dev.archdata.pci_data; | ||
82 | |||
83 | dn = pci_device_to_OF_node(pdev); | ||
84 | break; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* Fast path: fetch from device node */ | ||
89 | pdn = dn ? PCI_DN(dn) : NULL; | ||
90 | if (pdn) | ||
91 | return pdn; | ||
92 | |||
93 | /* Slow path: fetch from firmware data hierarchy */ | ||
94 | parent = pci_bus_to_pdn(bus); | ||
95 | if (!parent) | ||
96 | return NULL; | ||
97 | |||
98 | list_for_each_entry(pdn, &parent->child_list, list) { | ||
99 | if (pdn->busno == bus->number && | ||
100 | pdn->devfn == devfn) | ||
101 | return pdn; | ||
102 | } | ||
103 | |||
104 | return NULL; | ||
105 | } | ||
106 | |||
35 | struct pci_dn *pci_get_pdn(struct pci_dev *pdev) | 107 | struct pci_dn *pci_get_pdn(struct pci_dev *pdev) |
36 | { | 108 | { |
37 | struct device_node *dn = pci_device_to_OF_node(pdev); | 109 | struct device_node *dn; |
38 | if (!dn) | 110 | struct pci_dn *parent, *pdn; |
111 | |||
112 | /* Search device directly */ | ||
113 | if (pdev->dev.archdata.pci_data) | ||
114 | return pdev->dev.archdata.pci_data; | ||
115 | |||
116 | /* Check device node */ | ||
117 | dn = pci_device_to_OF_node(pdev); | ||
118 | pdn = dn ? PCI_DN(dn) : NULL; | ||
119 | if (pdn) | ||
120 | return pdn; | ||
121 | |||
122 | /* | ||
123 | * VFs don't have device nodes. We hook their | ||
124 | * firmware data to PF's bridge. | ||
125 | */ | ||
126 | parent = pci_bus_to_pdn(pdev->bus); | ||
127 | if (!parent) | ||
128 | return NULL; | ||
129 | |||
130 | list_for_each_entry(pdn, &parent->child_list, list) { | ||
131 | if (pdn->busno == pdev->bus->number && | ||
132 | pdn->devfn == pdev->devfn) | ||
133 | return pdn; | ||
134 | } | ||
135 | |||
136 | return NULL; | ||
137 | } | ||
138 | |||
139 | #ifdef CONFIG_PCI_IOV | ||
140 | static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent, | ||
141 | struct pci_dev *pdev, | ||
142 | int busno, int devfn) | ||
143 | { | ||
144 | struct pci_dn *pdn; | ||
145 | |||
146 | /* Except PHB, we always have the parent */ | ||
147 | if (!parent) | ||
148 | return NULL; | ||
149 | |||
150 | pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); | ||
151 | if (!pdn) { | ||
152 | dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__); | ||
39 | return NULL; | 153 | return NULL; |
40 | return PCI_DN(dn); | 154 | } |
155 | |||
156 | pdn->phb = parent->phb; | ||
157 | pdn->parent = parent; | ||
158 | pdn->busno = busno; | ||
159 | pdn->devfn = devfn; | ||
160 | #ifdef CONFIG_PPC_POWERNV | ||
161 | pdn->pe_number = IODA_INVALID_PE; | ||
162 | #endif | ||
163 | INIT_LIST_HEAD(&pdn->child_list); | ||
164 | INIT_LIST_HEAD(&pdn->list); | ||
165 | list_add_tail(&pdn->list, &parent->child_list); | ||
166 | |||
167 | /* | ||
168 | * If we already have PCI device instance, lets | ||
169 | * bind them. | ||
170 | */ | ||
171 | if (pdev) | ||
172 | pdev->dev.archdata.pci_data = pdn; | ||
173 | |||
174 | return pdn; | ||
175 | } | ||
176 | #endif | ||
177 | |||
178 | struct pci_dn *add_dev_pci_data(struct pci_dev *pdev) | ||
179 | { | ||
180 | #ifdef CONFIG_PCI_IOV | ||
181 | struct pci_dn *parent, *pdn; | ||
182 | int i; | ||
183 | |||
184 | /* Only support IOV for now */ | ||
185 | if (!pdev->is_physfn) | ||
186 | return pci_get_pdn(pdev); | ||
187 | |||
188 | /* Check if VFs have been populated */ | ||
189 | pdn = pci_get_pdn(pdev); | ||
190 | if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF)) | ||
191 | return NULL; | ||
192 | |||
193 | pdn->flags |= PCI_DN_FLAG_IOV_VF; | ||
194 | parent = pci_bus_to_pdn(pdev->bus); | ||
195 | if (!parent) | ||
196 | return NULL; | ||
197 | |||
198 | for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) { | ||
199 | pdn = add_one_dev_pci_data(parent, NULL, | ||
200 | pci_iov_virtfn_bus(pdev, i), | ||
201 | pci_iov_virtfn_devfn(pdev, i)); | ||
202 | if (!pdn) { | ||
203 | dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n", | ||
204 | __func__, i); | ||
205 | return NULL; | ||
206 | } | ||
207 | } | ||
208 | #endif /* CONFIG_PCI_IOV */ | ||
209 | |||
210 | return pci_get_pdn(pdev); | ||
211 | } | ||
212 | |||
213 | void remove_dev_pci_data(struct pci_dev *pdev) | ||
214 | { | ||
215 | #ifdef CONFIG_PCI_IOV | ||
216 | struct pci_dn *parent; | ||
217 | struct pci_dn *pdn, *tmp; | ||
218 | int i; | ||
219 | |||
220 | /* | ||
221 | * VF and VF PE are created/released dynamically, so we need to | ||
222 | * bind/unbind them. Otherwise the VF and VF PE would be mismatched | ||
223 | * when re-enabling SR-IOV. | ||
224 | */ | ||
225 | if (pdev->is_virtfn) { | ||
226 | pdn = pci_get_pdn(pdev); | ||
227 | #ifdef CONFIG_PPC_POWERNV | ||
228 | pdn->pe_number = IODA_INVALID_PE; | ||
229 | #endif | ||
230 | return; | ||
231 | } | ||
232 | |||
233 | /* Only support IOV PF for now */ | ||
234 | if (!pdev->is_physfn) | ||
235 | return; | ||
236 | |||
237 | /* Check if VFs have been populated */ | ||
238 | pdn = pci_get_pdn(pdev); | ||
239 | if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF)) | ||
240 | return; | ||
241 | |||
242 | pdn->flags &= ~PCI_DN_FLAG_IOV_VF; | ||
243 | parent = pci_bus_to_pdn(pdev->bus); | ||
244 | if (!parent) | ||
245 | return; | ||
246 | |||
247 | /* | ||
248 | * We might introduce flag to pci_dn in future | ||
249 | * so that we can release VF's firmware data in | ||
250 | * a batch mode. | ||
251 | */ | ||
252 | for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) { | ||
253 | list_for_each_entry_safe(pdn, tmp, | ||
254 | &parent->child_list, list) { | ||
255 | if (pdn->busno != pci_iov_virtfn_bus(pdev, i) || | ||
256 | pdn->devfn != pci_iov_virtfn_devfn(pdev, i)) | ||
257 | continue; | ||
258 | |||
259 | if (!list_empty(&pdn->list)) | ||
260 | list_del(&pdn->list); | ||
261 | |||
262 | kfree(pdn); | ||
263 | } | ||
264 | } | ||
265 | #endif /* CONFIG_PCI_IOV */ | ||
41 | } | 266 | } |
42 | 267 | ||
43 | /* | 268 | /* |
@@ -49,6 +274,7 @@ void *update_dn_pci_info(struct device_node *dn, void *data) | |||
49 | struct pci_controller *phb = data; | 274 | struct pci_controller *phb = data; |
50 | const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL); | 275 | const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL); |
51 | const __be32 *regs; | 276 | const __be32 *regs; |
277 | struct device_node *parent; | ||
52 | struct pci_dn *pdn; | 278 | struct pci_dn *pdn; |
53 | 279 | ||
54 | pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); | 280 | pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); |
@@ -69,7 +295,25 @@ void *update_dn_pci_info(struct device_node *dn, void *data) | |||
69 | pdn->devfn = (addr >> 8) & 0xff; | 295 | pdn->devfn = (addr >> 8) & 0xff; |
70 | } | 296 | } |
71 | 297 | ||
298 | /* vendor/device IDs and class code */ | ||
299 | regs = of_get_property(dn, "vendor-id", NULL); | ||
300 | pdn->vendor_id = regs ? of_read_number(regs, 1) : 0; | ||
301 | regs = of_get_property(dn, "device-id", NULL); | ||
302 | pdn->device_id = regs ? of_read_number(regs, 1) : 0; | ||
303 | regs = of_get_property(dn, "class-code", NULL); | ||
304 | pdn->class_code = regs ? of_read_number(regs, 1) : 0; | ||
305 | |||
306 | /* Extended config space */ | ||
72 | pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1); | 307 | pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1); |
308 | |||
309 | /* Attach to parent node */ | ||
310 | INIT_LIST_HEAD(&pdn->child_list); | ||
311 | INIT_LIST_HEAD(&pdn->list); | ||
312 | parent = of_get_parent(dn); | ||
313 | pdn->parent = parent ? PCI_DN(parent) : NULL; | ||
314 | if (pdn->parent) | ||
315 | list_add_tail(&pdn->list, &pdn->parent->child_list); | ||
316 | |||
73 | return NULL; | 317 | return NULL; |
74 | } | 318 | } |
75 | 319 | ||
@@ -131,6 +375,46 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, | |||
131 | return NULL; | 375 | return NULL; |
132 | } | 376 | } |
133 | 377 | ||
378 | static struct pci_dn *pci_dn_next_one(struct pci_dn *root, | ||
379 | struct pci_dn *pdn) | ||
380 | { | ||
381 | struct list_head *next = pdn->child_list.next; | ||
382 | |||
383 | if (next != &pdn->child_list) | ||
384 | return list_entry(next, struct pci_dn, list); | ||
385 | |||
386 | while (1) { | ||
387 | if (pdn == root) | ||
388 | return NULL; | ||
389 | |||
390 | next = pdn->list.next; | ||
391 | if (next != &pdn->parent->child_list) | ||
392 | break; | ||
393 | |||
394 | pdn = pdn->parent; | ||
395 | } | ||
396 | |||
397 | return list_entry(next, struct pci_dn, list); | ||
398 | } | ||
399 | |||
400 | void *traverse_pci_dn(struct pci_dn *root, | ||
401 | void *(*fn)(struct pci_dn *, void *), | ||
402 | void *data) | ||
403 | { | ||
404 | struct pci_dn *pdn = root; | ||
405 | void *ret; | ||
406 | |||
407 | /* Only scan the child nodes */ | ||
408 | for (pdn = pci_dn_next_one(root, pdn); pdn; | ||
409 | pdn = pci_dn_next_one(root, pdn)) { | ||
410 | ret = fn(pdn, data); | ||
411 | if (ret) | ||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | return NULL; | ||
416 | } | ||
417 | |||
134 | /** | 418 | /** |
135 | * pci_devs_phb_init_dynamic - setup pci devices under this PHB | 419 | * pci_devs_phb_init_dynamic - setup pci devices under this PHB |
136 | * phb: pci-to-host bridge (top-level bridge connecting to cpu) | 420 | * phb: pci-to-host bridge (top-level bridge connecting to cpu) |
@@ -147,8 +431,12 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb) | |||
147 | /* PHB nodes themselves must not match */ | 431 | /* PHB nodes themselves must not match */ |
148 | update_dn_pci_info(dn, phb); | 432 | update_dn_pci_info(dn, phb); |
149 | pdn = dn->data; | 433 | pdn = dn->data; |
150 | if (pdn) | 434 | if (pdn) { |
151 | pdn->devfn = pdn->busno = -1; | 435 | pdn->devfn = pdn->busno = -1; |
436 | pdn->vendor_id = pdn->device_id = pdn->class_code = 0; | ||
437 | pdn->phb = phb; | ||
438 | phb->pci_data = pdn; | ||
439 | } | ||
152 | 440 | ||
153 | /* Update dn->phb ptrs for new phb and children devices */ | 441 | /* Update dn->phb ptrs for new phb and children devices */ |
154 | traverse_pci_devices(dn, update_dn_pci_info, phb); | 442 | traverse_pci_devices(dn, update_dn_pci_info, phb); |
@@ -171,3 +459,16 @@ void __init pci_devs_phb_init(void) | |||
171 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) | 459 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) |
172 | pci_devs_phb_init_dynamic(phb); | 460 | pci_devs_phb_init_dynamic(phb); |
173 | } | 461 | } |
462 | |||
463 | static void pci_dev_pdn_setup(struct pci_dev *pdev) | ||
464 | { | ||
465 | struct pci_dn *pdn; | ||
466 | |||
467 | if (pdev->dev.archdata.pci_data) | ||
468 | return; | ||
469 | |||
470 | /* Setup the fast path */ | ||
471 | pdn = pci_get_pdn(pdev); | ||
472 | pdev->dev.archdata.pci_data = pdn; | ||
473 | } | ||
474 | DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup); | ||
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e6245e9c7d8d..42e02a2d570b 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -207,6 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev) | |||
207 | { | 207 | { |
208 | struct device_node *node = dev->dev.of_node; | 208 | struct device_node *node = dev->dev.of_node; |
209 | struct pci_bus *bus; | 209 | struct pci_bus *bus; |
210 | struct pci_controller *phb; | ||
210 | const __be32 *busrange, *ranges; | 211 | const __be32 *busrange, *ranges; |
211 | int len, i, mode; | 212 | int len, i, mode; |
212 | struct pci_bus_region region; | 213 | struct pci_bus_region region; |
@@ -286,9 +287,11 @@ void of_scan_pci_bridge(struct pci_dev *dev) | |||
286 | bus->number); | 287 | bus->number); |
287 | pr_debug(" bus name: %s\n", bus->name); | 288 | pr_debug(" bus name: %s\n", bus->name); |
288 | 289 | ||
290 | phb = pci_bus_to_host(bus); | ||
291 | |||
289 | mode = PCI_PROBE_NORMAL; | 292 | mode = PCI_PROBE_NORMAL; |
290 | if (ppc_md.pci_probe_mode) | 293 | if (phb->controller_ops.probe_mode) |
291 | mode = ppc_md.pci_probe_mode(bus); | 294 | mode = phb->controller_ops.probe_mode(bus); |
292 | pr_debug(" probe mode: %d\n", mode); | 295 | pr_debug(" probe mode: %d\n", mode); |
293 | 296 | ||
294 | if (mode == PCI_PROBE_DEVTREE) | 297 | if (mode == PCI_PROBE_DEVTREE) |
@@ -305,7 +308,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, | |||
305 | const __be32 *reg; | 308 | const __be32 *reg; |
306 | int reglen, devfn; | 309 | int reglen, devfn; |
307 | #ifdef CONFIG_EEH | 310 | #ifdef CONFIG_EEH |
308 | struct eeh_dev *edev = of_node_to_eeh_dev(dn); | 311 | struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn)); |
309 | #endif | 312 | #endif |
310 | 313 | ||
311 | pr_debug(" * %s\n", dn->full_name); | 314 | pr_debug(" * %s\n", dn->full_name); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b4cc7bef6b16..febb50dd5328 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1114,8 +1114,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) | |||
1114 | */ | 1114 | */ |
1115 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ | 1115 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ |
1116 | 1116 | ||
1117 | /* | ||
1118 | * Copy architecture-specific thread state | ||
1119 | */ | ||
1117 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 1120 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
1118 | unsigned long arg, struct task_struct *p) | 1121 | unsigned long kthread_arg, struct task_struct *p) |
1119 | { | 1122 | { |
1120 | struct pt_regs *childregs, *kregs; | 1123 | struct pt_regs *childregs, *kregs; |
1121 | extern void ret_from_fork(void); | 1124 | extern void ret_from_fork(void); |
@@ -1127,6 +1130,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
1127 | sp -= sizeof(struct pt_regs); | 1130 | sp -= sizeof(struct pt_regs); |
1128 | childregs = (struct pt_regs *) sp; | 1131 | childregs = (struct pt_regs *) sp; |
1129 | if (unlikely(p->flags & PF_KTHREAD)) { | 1132 | if (unlikely(p->flags & PF_KTHREAD)) { |
1133 | /* kernel thread */ | ||
1130 | struct thread_info *ti = (void *)task_stack_page(p); | 1134 | struct thread_info *ti = (void *)task_stack_page(p); |
1131 | memset(childregs, 0, sizeof(struct pt_regs)); | 1135 | memset(childregs, 0, sizeof(struct pt_regs)); |
1132 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | 1136 | childregs->gpr[1] = sp + sizeof(struct pt_regs); |
@@ -1137,11 +1141,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
1137 | clear_tsk_thread_flag(p, TIF_32BIT); | 1141 | clear_tsk_thread_flag(p, TIF_32BIT); |
1138 | childregs->softe = 1; | 1142 | childregs->softe = 1; |
1139 | #endif | 1143 | #endif |
1140 | childregs->gpr[15] = arg; | 1144 | childregs->gpr[15] = kthread_arg; |
1141 | p->thread.regs = NULL; /* no user register state */ | 1145 | p->thread.regs = NULL; /* no user register state */ |
1142 | ti->flags |= _TIF_RESTOREALL; | 1146 | ti->flags |= _TIF_RESTOREALL; |
1143 | f = ret_from_kernel_thread; | 1147 | f = ret_from_kernel_thread; |
1144 | } else { | 1148 | } else { |
1149 | /* user thread */ | ||
1145 | struct pt_regs *regs = current_pt_regs(); | 1150 | struct pt_regs *regs = current_pt_regs(); |
1146 | CHECK_FULL_REGS(regs); | 1151 | CHECK_FULL_REGS(regs); |
1147 | *childregs = *regs; | 1152 | *childregs = *regs; |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 1a85d8f96739..fd1fe4c37599 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2898,7 +2898,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2898 | * Call OF "quiesce" method to shut down pending DMA's from | 2898 | * Call OF "quiesce" method to shut down pending DMA's from |
2899 | * devices etc... | 2899 | * devices etc... |
2900 | */ | 2900 | */ |
2901 | prom_printf("Calling quiesce...\n"); | 2901 | prom_printf("Quiescing Open Firmware ...\n"); |
2902 | call_prom("quiesce", 0, 0); | 2902 | call_prom("quiesce", 0, 0); |
2903 | 2903 | ||
2904 | /* | 2904 | /* |
@@ -2910,7 +2910,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2910 | 2910 | ||
2911 | /* Don't print anything after quiesce under OPAL, it crashes OFW */ | 2911 | /* Don't print anything after quiesce under OPAL, it crashes OFW */ |
2912 | if (of_platform != PLATFORM_OPAL) { | 2912 | if (of_platform != PLATFORM_OPAL) { |
2913 | prom_printf("returning from prom_init\n"); | 2913 | prom_printf("Booting Linux via __start() ...\n"); |
2914 | prom_debug("->dt_header_start=0x%x\n", hdr); | 2914 | prom_debug("->dt_header_start=0x%x\n", hdr); |
2915 | } | 2915 | } |
2916 | 2916 | ||
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 21c45a2d0706..7a488c108410 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -401,7 +401,7 @@ static char *__fetch_rtas_last_error(char *altbuf) | |||
401 | buf = altbuf; | 401 | buf = altbuf; |
402 | } else { | 402 | } else { |
403 | buf = rtas_err_buf; | 403 | buf = rtas_err_buf; |
404 | if (mem_init_done) | 404 | if (slab_is_available()) |
405 | buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); | 405 | buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC); |
406 | } | 406 | } |
407 | if (buf) | 407 | if (buf) |
@@ -461,7 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) | |||
461 | 461 | ||
462 | if (buff_copy) { | 462 | if (buff_copy) { |
463 | log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); | 463 | log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); |
464 | if (mem_init_done) | 464 | if (slab_is_available()) |
465 | kfree(buff_copy); | 465 | kfree(buff_copy); |
466 | } | 466 | } |
467 | return ret; | 467 | return ret; |
@@ -897,7 +897,7 @@ int rtas_offline_cpus_mask(cpumask_var_t cpus) | |||
897 | } | 897 | } |
898 | EXPORT_SYMBOL(rtas_offline_cpus_mask); | 898 | EXPORT_SYMBOL(rtas_offline_cpus_mask); |
899 | 899 | ||
900 | int rtas_ibm_suspend_me(u64 handle, int *vasi_return) | 900 | int rtas_ibm_suspend_me(u64 handle) |
901 | { | 901 | { |
902 | long state; | 902 | long state; |
903 | long rc; | 903 | long rc; |
@@ -919,13 +919,11 @@ int rtas_ibm_suspend_me(u64 handle, int *vasi_return) | |||
919 | printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); | 919 | printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); |
920 | return rc; | 920 | return rc; |
921 | } else if (state == H_VASI_ENABLED) { | 921 | } else if (state == H_VASI_ENABLED) { |
922 | *vasi_return = RTAS_NOT_SUSPENDABLE; | 922 | return -EAGAIN; |
923 | return 0; | ||
924 | } else if (state != H_VASI_SUSPENDING) { | 923 | } else if (state != H_VASI_SUSPENDING) { |
925 | printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", | 924 | printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", |
926 | state); | 925 | state); |
927 | *vasi_return = -1; | 926 | return -EIO; |
928 | return 0; | ||
929 | } | 927 | } |
930 | 928 | ||
931 | if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) | 929 | if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) |
@@ -972,7 +970,7 @@ out: | |||
972 | return atomic_read(&data.error); | 970 | return atomic_read(&data.error); |
973 | } | 971 | } |
974 | #else /* CONFIG_PPC_PSERIES */ | 972 | #else /* CONFIG_PPC_PSERIES */ |
975 | int rtas_ibm_suspend_me(u64 handle, int *vasi_return) | 973 | int rtas_ibm_suspend_me(u64 handle) |
976 | { | 974 | { |
977 | return -ENOSYS; | 975 | return -ENOSYS; |
978 | } | 976 | } |
@@ -1022,7 +1020,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
1022 | unsigned long flags; | 1020 | unsigned long flags; |
1023 | char *buff_copy, *errbuf = NULL; | 1021 | char *buff_copy, *errbuf = NULL; |
1024 | int nargs, nret, token; | 1022 | int nargs, nret, token; |
1025 | int rc; | ||
1026 | 1023 | ||
1027 | if (!capable(CAP_SYS_ADMIN)) | 1024 | if (!capable(CAP_SYS_ADMIN)) |
1028 | return -EPERM; | 1025 | return -EPERM; |
@@ -1054,15 +1051,18 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
1054 | if (token == ibm_suspend_me_token) { | 1051 | if (token == ibm_suspend_me_token) { |
1055 | 1052 | ||
1056 | /* | 1053 | /* |
1057 | * rtas_ibm_suspend_me assumes args are in cpu endian, or at least the | 1054 | * rtas_ibm_suspend_me assumes the streamid handle is in cpu |
1058 | * hcall within it requires it. | 1055 | * endian, or at least the hcall within it requires it. |
1059 | */ | 1056 | */ |
1060 | int vasi_rc = 0; | 1057 | int rc = 0; |
1061 | u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32) | 1058 | u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32) |
1062 | | be32_to_cpu(args.args[1]); | 1059 | | be32_to_cpu(args.args[1]); |
1063 | rc = rtas_ibm_suspend_me(handle, &vasi_rc); | 1060 | rc = rtas_ibm_suspend_me(handle); |
1064 | args.rets[0] = cpu_to_be32(vasi_rc); | 1061 | if (rc == -EAGAIN) |
1065 | if (rc) | 1062 | args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE); |
1063 | else if (rc == -EIO) | ||
1064 | args.rets[0] = cpu_to_be32(-1); | ||
1065 | else if (rc) | ||
1066 | return rc; | 1066 | return rc; |
1067 | goto copy_return; | 1067 | goto copy_return; |
1068 | } | 1068 | } |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index ce230da2c015..73f1934582c2 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -113,7 +113,7 @@ static int rtas_pci_read_config(struct pci_bus *bus, | |||
113 | 113 | ||
114 | ret = rtas_read_config(pdn, where, size, val); | 114 | ret = rtas_read_config(pdn, where, size, val); |
115 | if (*val == EEH_IO_ERROR_VALUE(size) && | 115 | if (*val == EEH_IO_ERROR_VALUE(size) && |
116 | eeh_dev_check_failure(of_node_to_eeh_dev(dn))) | 116 | eeh_dev_check_failure(pdn_to_eeh_dev(pdn))) |
117 | return PCIBIOS_DEVICE_NOT_FOUND; | 117 | return PCIBIOS_DEVICE_NOT_FOUND; |
118 | 118 | ||
119 | return ret; | 119 | return ret; |
@@ -277,50 +277,3 @@ int rtas_setup_phb(struct pci_controller *phb) | |||
277 | 277 | ||
278 | return 0; | 278 | return 0; |
279 | } | 279 | } |
280 | |||
281 | void __init find_and_init_phbs(void) | ||
282 | { | ||
283 | struct device_node *node; | ||
284 | struct pci_controller *phb; | ||
285 | struct device_node *root = of_find_node_by_path("/"); | ||
286 | |||
287 | for_each_child_of_node(root, node) { | ||
288 | if (node->type == NULL || (strcmp(node->type, "pci") != 0 && | ||
289 | strcmp(node->type, "pciex") != 0)) | ||
290 | continue; | ||
291 | |||
292 | phb = pcibios_alloc_controller(node); | ||
293 | if (!phb) | ||
294 | continue; | ||
295 | rtas_setup_phb(phb); | ||
296 | pci_process_bridge_OF_ranges(phb, node, 0); | ||
297 | isa_bridge_find_early(phb); | ||
298 | } | ||
299 | |||
300 | of_node_put(root); | ||
301 | pci_devs_phb_init(); | ||
302 | |||
303 | /* | ||
304 | * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties | ||
305 | * in chosen. | ||
306 | */ | ||
307 | if (of_chosen) { | ||
308 | const int *prop; | ||
309 | |||
310 | prop = of_get_property(of_chosen, | ||
311 | "linux,pci-probe-only", NULL); | ||
312 | if (prop) { | ||
313 | if (*prop) | ||
314 | pci_add_flags(PCI_PROBE_ONLY); | ||
315 | else | ||
316 | pci_clear_flags(PCI_PROBE_ONLY); | ||
317 | } | ||
318 | |||
319 | #ifdef CONFIG_PPC32 /* Will be made generic soon */ | ||
320 | prop = of_get_property(of_chosen, | ||
321 | "linux,pci-assign-all-buses", NULL); | ||
322 | if (prop && *prop) | ||
323 | pci_add_flags(PCI_REASSIGN_ALL_BUS); | ||
324 | #endif /* CONFIG_PPC32 */ | ||
325 | } | ||
326 | } | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 49f553bbb360..c69671c03c3b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/memblock.h> | 37 | #include <linux/memblock.h> |
38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
39 | #include <linux/memory.h> | 39 | #include <linux/memory.h> |
40 | #include <linux/nmi.h> | ||
40 | 41 | ||
41 | #include <asm/io.h> | 42 | #include <asm/io.h> |
42 | #include <asm/kdump.h> | 43 | #include <asm/kdump.h> |
@@ -779,3 +780,22 @@ unsigned long memory_block_size_bytes(void) | |||
779 | struct ppc_pci_io ppc_pci_io; | 780 | struct ppc_pci_io ppc_pci_io; |
780 | EXPORT_SYMBOL(ppc_pci_io); | 781 | EXPORT_SYMBOL(ppc_pci_io); |
781 | #endif | 782 | #endif |
783 | |||
784 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
785 | u64 hw_nmi_get_sample_period(int watchdog_thresh) | ||
786 | { | ||
787 | return ppc_proc_freq * watchdog_thresh; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * The hardlockup detector breaks PMU event based branches and is likely | ||
792 | * to get false positives in KVM guests, so disable it by default. | ||
793 | */ | ||
794 | static int __init disable_hardlockup_detector(void) | ||
795 | { | ||
796 | hardlockup_detector_disable(); | ||
797 | |||
798 | return 0; | ||
799 | } | ||
800 | early_initcall(disable_hardlockup_detector); | ||
801 | #endif | ||
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index b2702e87db0d..5fa92706444b 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c | |||
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, | |||
121 | return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, | 121 | return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, |
122 | (u64)len_high << 32 | len_low, advice); | 122 | (u64)len_high << 32 | len_low, advice); |
123 | } | 123 | } |
124 | |||
125 | long sys_switch_endian(void) | ||
126 | { | ||
127 | struct thread_info *ti; | ||
128 | |||
129 | current->thread.regs->msr ^= MSR_LE; | ||
130 | |||
131 | /* | ||
132 | * Set TIF_RESTOREALL so that r3 isn't clobbered on return to | ||
133 | * userspace. That also has the effect of restoring the non-volatile | ||
134 | * GPRs, so we saved them on the way in here. | ||
135 | */ | ||
136 | ti = current_thread_info(); | ||
137 | ti->flags |= _TIF_RESTOREALL; | ||
138 | |||
139 | return 0; | ||
140 | } | ||
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 7ab5d434e2ee..4d6b1d3a747f 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S | |||
@@ -22,6 +22,7 @@ | |||
22 | #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) | 22 | #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) |
23 | #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) | 23 | #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) |
24 | #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) | 24 | #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) |
25 | #define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall) | ||
25 | #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) | 26 | #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) |
26 | #else | 27 | #else |
27 | #define SYSCALL(func) .long sys_##func | 28 | #define SYSCALL(func) .long sys_##func |
@@ -29,6 +30,7 @@ | |||
29 | #define PPC_SYS(func) .long ppc_##func | 30 | #define PPC_SYS(func) .long ppc_##func |
30 | #define OLDSYS(func) .long sys_##func | 31 | #define OLDSYS(func) .long sys_##func |
31 | #define SYS32ONLY(func) .long sys_##func | 32 | #define SYS32ONLY(func) .long sys_##func |
33 | #define PPC64ONLY(func) .long sys_ni_syscall | ||
32 | #define SYSX(f, f3264, f32) .long f32 | 34 | #define SYSX(f, f3264, f32) .long f32 |
33 | #endif | 35 | #endif |
34 | #define SYSCALL_SPU(func) SYSCALL(func) | 36 | #define SYSCALL_SPU(func) SYSCALL(func) |
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c index 238aa63ced8f..2384129f5893 100644 --- a/arch/powerpc/kernel/systbl_chk.c +++ b/arch/powerpc/kernel/systbl_chk.c | |||
@@ -21,9 +21,11 @@ | |||
21 | #ifdef CONFIG_PPC64 | 21 | #ifdef CONFIG_PPC64 |
22 | #define OLDSYS(func) -1 | 22 | #define OLDSYS(func) -1 |
23 | #define SYS32ONLY(func) -1 | 23 | #define SYS32ONLY(func) -1 |
24 | #define PPC64ONLY(func) __NR_##func | ||
24 | #else | 25 | #else |
25 | #define OLDSYS(func) __NR_old##func | 26 | #define OLDSYS(func) __NR_old##func |
26 | #define SYS32ONLY(func) __NR_##func | 27 | #define SYS32ONLY(func) __NR_##func |
28 | #define PPC64ONLY(func) -1 | ||
27 | #endif | 29 | #endif |
28 | #define SYSX(f, f3264, f32) -1 | 30 | #define SYSX(f, f3264, f32) -1 |
29 | 31 | ||
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 2a324f4cb1b9..5754b226da7e 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
@@ -152,9 +152,9 @@ _GLOBAL(tm_reclaim) | |||
152 | 152 | ||
153 | addi r7, r3, THREAD_TRANSACT_VRSTATE | 153 | addi r7, r3, THREAD_TRANSACT_VRSTATE |
154 | SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ | 154 | SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ |
155 | mfvscr vr0 | 155 | mfvscr v0 |
156 | li r6, VRSTATE_VSCR | 156 | li r6, VRSTATE_VSCR |
157 | stvx vr0, r7, r6 | 157 | stvx v0, r7, r6 |
158 | dont_backup_vec: | 158 | dont_backup_vec: |
159 | mfspr r0, SPRN_VRSAVE | 159 | mfspr r0, SPRN_VRSAVE |
160 | std r0, THREAD_TRANSACT_VRSAVE(r3) | 160 | std r0, THREAD_TRANSACT_VRSAVE(r3) |
@@ -359,8 +359,8 @@ _GLOBAL(__tm_recheckpoint) | |||
359 | 359 | ||
360 | addi r8, r3, THREAD_VRSTATE | 360 | addi r8, r3, THREAD_VRSTATE |
361 | li r5, VRSTATE_VSCR | 361 | li r5, VRSTATE_VSCR |
362 | lvx vr0, r8, r5 | 362 | lvx v0, r8, r5 |
363 | mtvscr vr0 | 363 | mtvscr v0 |
364 | REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ | 364 | REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ |
365 | dont_restore_vec: | 365 | dont_restore_vec: |
366 | ld r5, THREAD_VRSAVE(r3) | 366 | ld r5, THREAD_VRSAVE(r3) |
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index b7aa07279a63..7cc38b5b58bc 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c | |||
@@ -46,8 +46,6 @@ void __init udbg_early_init(void) | |||
46 | #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) | 46 | #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) |
47 | /* Maple real mode debug */ | 47 | /* Maple real mode debug */ |
48 | udbg_init_maple_realmode(); | 48 | udbg_init_maple_realmode(); |
49 | #elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT) | ||
50 | udbg_init_debug_beat(); | ||
51 | #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) | 49 | #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) |
52 | udbg_init_pas_realmode(); | 50 | udbg_init_pas_realmode(); |
53 | #elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) | 51 | #elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 74f8050518d6..f5c80d567d8d 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -24,8 +24,8 @@ _GLOBAL(do_load_up_transact_altivec) | |||
24 | stw r4,THREAD_USED_VR(r3) | 24 | stw r4,THREAD_USED_VR(r3) |
25 | 25 | ||
26 | li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR | 26 | li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR |
27 | lvx vr0,r10,r3 | 27 | lvx v0,r10,r3 |
28 | mtvscr vr0 | 28 | mtvscr v0 |
29 | addi r10,r3,THREAD_TRANSACT_VRSTATE | 29 | addi r10,r3,THREAD_TRANSACT_VRSTATE |
30 | REST_32VRS(0,r4,r10) | 30 | REST_32VRS(0,r4,r10) |
31 | 31 | ||
@@ -52,8 +52,8 @@ _GLOBAL(vec_enable) | |||
52 | */ | 52 | */ |
53 | _GLOBAL(load_vr_state) | 53 | _GLOBAL(load_vr_state) |
54 | li r4,VRSTATE_VSCR | 54 | li r4,VRSTATE_VSCR |
55 | lvx vr0,r4,r3 | 55 | lvx v0,r4,r3 |
56 | mtvscr vr0 | 56 | mtvscr v0 |
57 | REST_32VRS(0,r4,r3) | 57 | REST_32VRS(0,r4,r3) |
58 | blr | 58 | blr |
59 | 59 | ||
@@ -63,9 +63,9 @@ _GLOBAL(load_vr_state) | |||
63 | */ | 63 | */ |
64 | _GLOBAL(store_vr_state) | 64 | _GLOBAL(store_vr_state) |
65 | SAVE_32VRS(0, r4, r3) | 65 | SAVE_32VRS(0, r4, r3) |
66 | mfvscr vr0 | 66 | mfvscr v0 |
67 | li r4, VRSTATE_VSCR | 67 | li r4, VRSTATE_VSCR |
68 | stvx vr0, r4, r3 | 68 | stvx v0, r4, r3 |
69 | blr | 69 | blr |
70 | 70 | ||
71 | /* | 71 | /* |
@@ -104,9 +104,9 @@ _GLOBAL(load_up_altivec) | |||
104 | addi r4,r4,THREAD | 104 | addi r4,r4,THREAD |
105 | addi r6,r4,THREAD_VRSTATE | 105 | addi r6,r4,THREAD_VRSTATE |
106 | SAVE_32VRS(0,r5,r6) | 106 | SAVE_32VRS(0,r5,r6) |
107 | mfvscr vr0 | 107 | mfvscr v0 |
108 | li r10,VRSTATE_VSCR | 108 | li r10,VRSTATE_VSCR |
109 | stvx vr0,r10,r6 | 109 | stvx v0,r10,r6 |
110 | /* Disable VMX for last_task_used_altivec */ | 110 | /* Disable VMX for last_task_used_altivec */ |
111 | PPC_LL r5,PT_REGS(r4) | 111 | PPC_LL r5,PT_REGS(r4) |
112 | toreal(r5) | 112 | toreal(r5) |
@@ -142,8 +142,8 @@ _GLOBAL(load_up_altivec) | |||
142 | li r4,1 | 142 | li r4,1 |
143 | li r10,VRSTATE_VSCR | 143 | li r10,VRSTATE_VSCR |
144 | stw r4,THREAD_USED_VR(r5) | 144 | stw r4,THREAD_USED_VR(r5) |
145 | lvx vr0,r10,r6 | 145 | lvx v0,r10,r6 |
146 | mtvscr vr0 | 146 | mtvscr v0 |
147 | REST_32VRS(0,r4,r6) | 147 | REST_32VRS(0,r4,r6) |
148 | #ifndef CONFIG_SMP | 148 | #ifndef CONFIG_SMP |
149 | /* Update last_task_used_altivec to 'current' */ | 149 | /* Update last_task_used_altivec to 'current' */ |
@@ -186,9 +186,9 @@ _GLOBAL(giveup_altivec) | |||
186 | addi r7,r3,THREAD_VRSTATE | 186 | addi r7,r3,THREAD_VRSTATE |
187 | 2: PPC_LCMPI 0,r5,0 | 187 | 2: PPC_LCMPI 0,r5,0 |
188 | SAVE_32VRS(0,r4,r7) | 188 | SAVE_32VRS(0,r4,r7) |
189 | mfvscr vr0 | 189 | mfvscr v0 |
190 | li r4,VRSTATE_VSCR | 190 | li r4,VRSTATE_VSCR |
191 | stvx vr0,r4,r7 | 191 | stvx v0,r4,r7 |
192 | beq 1f | 192 | beq 1f |
193 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 193 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
194 | #ifdef CONFIG_VSX | 194 | #ifdef CONFIG_VSX |