diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-30 11:10:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-30 11:10:12 -0400 |
commit | 24a77daf3d80bddcece044e6dc3675e427eef3f3 (patch) | |
tree | 2c5e0b0bea394d6fe62c5d5857c252e83e48ac48 /include | |
parent | e389f9aec689209724105ae80a6c91fd2e747bc9 (diff) | |
parent | f900e9777fc9b65140cb9570438597bc8fae56ab (diff) |
Merge branch 'for-2.6.22' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'for-2.6.22' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (255 commits)
[POWERPC] Remove dev_dbg redefinition in drivers/ps3/vuart.c
[POWERPC] remove kernel module option for booke wdt
[POWERPC] Avoid putting cpu node twice
[POWERPC] Spinlock initializer cleanup
[POWERPC] ppc4xx_sgdma needs dma-mapping.h
[POWERPC] arch/powerpc/sysdev/timer.c build fix
[POWERPC] get_property cleanups
[POWERPC] Remove the unused HTDMSOUND driver
[POWERPC] cell: cbe_cpufreq cleanup and crash fix
[POWERPC] Declare enable_kernel_spe in a header
[POWERPC] Add dt_xlate_addr() to bootwrapper
[POWERPC] bootwrapper: CONFIG_ -> CONFIG_DEVICE_TREE
[POWERPC] Don't define a custom bd_t for Xilixn Virtex based boards.
[POWERPC] Add sane defaults for Xilinx EDK generated xparameters files
[POWERPC] Add uartlite boot console driver for the zImage wrapper
[POWERPC] Stop using ppc_sys for Xilinx Virtex boards
[POWERPC] New registration for common Xilinx Virtex ppc405 platform devices
[POWERPC] Merge common virtex header files
[POWERPC] Rework Kconfig dependancies for Xilinx Virtex ppc405 platform
[POWERPC] Clean up cpufreq Kconfig dependencies
...
Diffstat (limited to 'include')
38 files changed, 812 insertions, 682 deletions
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h index c89bd58ee283..c19e7367fce6 100644 --- a/include/asm-powerpc/asm-compat.h +++ b/include/asm-powerpc/asm-compat.h | |||
@@ -78,6 +78,15 @@ | |||
78 | #define PPC_STLCX stringify_in_c(stdcx.) | 78 | #define PPC_STLCX stringify_in_c(stdcx.) |
79 | #define PPC_CNTLZL stringify_in_c(cntlzd) | 79 | #define PPC_CNTLZL stringify_in_c(cntlzd) |
80 | 80 | ||
81 | /* Move to CR, single-entry optimized version. Only available | ||
82 | * on POWER4 and later. | ||
83 | */ | ||
84 | #ifdef CONFIG_POWER4_ONLY | ||
85 | #define PPC_MTOCRF stringify_in_c(mtocrf) | ||
86 | #else | ||
87 | #define PPC_MTOCRF stringify_in_c(mtcrf) | ||
88 | #endif | ||
89 | |||
81 | #else /* 32-bit */ | 90 | #else /* 32-bit */ |
82 | 91 | ||
83 | /* operations for longs and pointers */ | 92 | /* operations for longs and pointers */ |
@@ -89,6 +98,7 @@ | |||
89 | #define PPC_LLARX stringify_in_c(lwarx) | 98 | #define PPC_LLARX stringify_in_c(lwarx) |
90 | #define PPC_STLCX stringify_in_c(stwcx.) | 99 | #define PPC_STLCX stringify_in_c(stwcx.) |
91 | #define PPC_CNTLZL stringify_in_c(cntlzw) | 100 | #define PPC_CNTLZL stringify_in_c(cntlzw) |
101 | #define PPC_MTOCRF stringify_in_c(mtcrf) | ||
92 | 102 | ||
93 | #endif | 103 | #endif |
94 | 104 | ||
diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h index 08e93e789219..ba667a383b8c 100644 --- a/include/asm-powerpc/cacheflush.h +++ b/include/asm-powerpc/cacheflush.h | |||
@@ -64,6 +64,12 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); | |||
64 | memcpy(dst, src, len) | 64 | memcpy(dst, src, len) |
65 | 65 | ||
66 | 66 | ||
67 | |||
68 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
69 | /* internal debugging function */ | ||
70 | void kernel_map_pages(struct page *page, int numpages, int enable); | ||
71 | #endif | ||
72 | |||
67 | #endif /* __KERNEL__ */ | 73 | #endif /* __KERNEL__ */ |
68 | 74 | ||
69 | #endif /* _ASM_POWERPC_CACHEFLUSH_H */ | 75 | #endif /* _ASM_POWERPC_CACHEFLUSH_H */ |
diff --git a/include/asm-powerpc/cell-pmu.h b/include/asm-powerpc/cell-pmu.h index 35b95773746c..8066eede3a0c 100644 --- a/include/asm-powerpc/cell-pmu.h +++ b/include/asm-powerpc/cell-pmu.h | |||
@@ -97,11 +97,6 @@ extern void cbe_disable_pm_interrupts(u32 cpu); | |||
97 | extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); | 97 | extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); |
98 | extern void cbe_sync_irq(int node); | 98 | extern void cbe_sync_irq(int node); |
99 | 99 | ||
100 | /* Utility functions, macros */ | ||
101 | extern u32 cbe_get_hw_thread_id(int cpu); | ||
102 | |||
103 | #define cbe_cpu_to_node(cpu) ((cpu) >> 1) | ||
104 | |||
105 | #define CBE_COUNT_SUPERVISOR_MODE 0 | 100 | #define CBE_COUNT_SUPERVISOR_MODE 0 |
106 | #define CBE_COUNT_HYPERVISOR_MODE 1 | 101 | #define CBE_COUNT_HYPERVISOR_MODE 1 |
107 | #define CBE_COUNT_PROBLEM_MODE 2 | 102 | #define CBE_COUNT_PROBLEM_MODE 2 |
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index e870b5393175..434524931ef3 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h | |||
@@ -48,6 +48,7 @@ enum powerpc_oprofile_type { | |||
48 | PPC_OPROFILE_G4 = 3, | 48 | PPC_OPROFILE_G4 = 3, |
49 | PPC_OPROFILE_BOOKE = 4, | 49 | PPC_OPROFILE_BOOKE = 4, |
50 | PPC_OPROFILE_CELL = 5, | 50 | PPC_OPROFILE_CELL = 5, |
51 | PPC_OPROFILE_PA6T = 6, | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | enum powerpc_pmc_type { | 54 | enum powerpc_pmc_type { |
@@ -223,6 +224,10 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
223 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ | 224 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ |
224 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ | 225 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ |
225 | CPU_FTR_PPC_LE) | 226 | CPU_FTR_PPC_LE) |
227 | #define CPU_FTRS_750CL (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ | ||
228 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ | ||
229 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ | ||
230 | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) | ||
226 | #define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ | 231 | #define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ |
227 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ | 232 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ |
228 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ | 233 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ |
@@ -235,9 +240,9 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, | |||
235 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ | 240 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ |
236 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ | 241 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ |
237 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) | 242 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) |
238 | #define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ | 243 | #define CPU_FTRS_750GX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ |
239 | CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \ | 244 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ |
240 | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ | 245 | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ |
241 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) | 246 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) |
242 | #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ | 247 | #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ |
243 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ | 248 | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ |
diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h index b8708aedf925..e2c7f06931e7 100644 --- a/include/asm-powerpc/current.h +++ b/include/asm-powerpc/current.h | |||
@@ -12,6 +12,7 @@ | |||
12 | struct task_struct; | 12 | struct task_struct; |
13 | 13 | ||
14 | #ifdef __powerpc64__ | 14 | #ifdef __powerpc64__ |
15 | #include <linux/stddef.h> | ||
15 | #include <asm/paca.h> | 16 | #include <asm/paca.h> |
16 | 17 | ||
17 | static inline struct task_struct *get_current(void) | 18 | static inline struct task_struct *get_current(void) |
diff --git a/include/asm-powerpc/edac.h b/include/asm-powerpc/edac.h new file mode 100644 index 000000000000..6ead88bbfbb8 --- /dev/null +++ b/include/asm-powerpc/edac.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * PPC EDAC common defs | ||
3 | * | ||
4 | * Author: Dave Jiang <djiang@mvista.com> | ||
5 | * | ||
6 | * 2007 (c) MontaVista Software, Inc. This file is licensed under | ||
7 | * the terms of the GNU General Public License version 2. This program | ||
8 | * is licensed "as is" without any warranty of any kind, whether express | ||
9 | * or implied. | ||
10 | */ | ||
11 | #ifndef ASM_EDAC_H | ||
12 | #define ASM_EDAC_H | ||
13 | /* | ||
14 | * ECC atomic, DMA, SMP and interrupt safe scrub function. | ||
15 | * Implements the per arch atomic_scrub() that EDAC use for software | ||
16 | * ECC scrubbing. It reads memory and then writes back the original | ||
17 | * value, allowing the hardware to detect and correct memory errors. | ||
18 | */ | ||
19 | static __inline__ void atomic_scrub(void *va, u32 size) | ||
20 | { | ||
21 | unsigned int *virt_addr = va; | ||
22 | unsigned int temp; | ||
23 | unsigned int i; | ||
24 | |||
25 | for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { | ||
26 | /* Very carefully read and write to memory atomically | ||
27 | * so we are interrupt, DMA and SMP safe. | ||
28 | */ | ||
29 | __asm__ __volatile__ ("\n\ | ||
30 | 1: lwarx %0,0,%1\n\ | ||
31 | stwcx. %0,0,%1\n\ | ||
32 | bne- 1b\n\ | ||
33 | isync" | ||
34 | : "=&r"(temp) | ||
35 | : "r"(virt_addr) | ||
36 | : "cr0", "memory"); | ||
37 | } | ||
38 | } | ||
39 | |||
40 | #endif | ||
diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h index dc6bf0ffb796..cc3cb04539ac 100644 --- a/include/asm-powerpc/eeh_event.h +++ b/include/asm-powerpc/eeh_event.h | |||
@@ -30,8 +30,6 @@ struct eeh_event { | |||
30 | struct list_head list; | 30 | struct list_head list; |
31 | struct device_node *dn; /* struct device node */ | 31 | struct device_node *dn; /* struct device node */ |
32 | struct pci_dev *dev; /* affected device */ | 32 | struct pci_dev *dev; /* affected device */ |
33 | enum pci_channel_state state; /* PCI bus state for the affected device */ | ||
34 | int time_unavail; /* milliseconds until device might be available */ | ||
35 | }; | 33 | }; |
36 | 34 | ||
37 | /** | 35 | /** |
@@ -46,9 +44,7 @@ struct eeh_event { | |||
46 | * (from a workqueue). | 44 | * (from a workqueue). |
47 | */ | 45 | */ |
48 | int eeh_send_failure_event (struct device_node *dn, | 46 | int eeh_send_failure_event (struct device_node *dn, |
49 | struct pci_dev *dev, | 47 | struct pci_dev *dev); |
50 | enum pci_channel_state state, | ||
51 | int time_unavail); | ||
52 | 48 | ||
53 | /* Main recovery function */ | 49 | /* Main recovery function */ |
54 | struct pci_dn * handle_eeh_events (struct eeh_event *); | 50 | struct pci_dn * handle_eeh_events (struct eeh_event *); |
diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h index 66112114b8c5..87d396e28db2 100644 --- a/include/asm-powerpc/ibmebus.h +++ b/include/asm-powerpc/ibmebus.h | |||
@@ -2,36 +2,37 @@ | |||
2 | * IBM PowerPC eBus Infrastructure Support. | 2 | * IBM PowerPC eBus Infrastructure Support. |
3 | * | 3 | * |
4 | * Copyright (c) 2005 IBM Corporation | 4 | * Copyright (c) 2005 IBM Corporation |
5 | * Joachim Fenkes <fenkes@de.ibm.com> | ||
5 | * Heiko J Schick <schickhj@de.ibm.com> | 6 | * Heiko J Schick <schickhj@de.ibm.com> |
6 | * | 7 | * |
7 | * All rights reserved. | 8 | * All rights reserved. |
8 | * | 9 | * |
9 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | 10 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB |
10 | * BSD. | 11 | * BSD. |
11 | * | 12 | * |
12 | * OpenIB BSD License | 13 | * OpenIB BSD License |
13 | * | 14 | * |
14 | * Redistribution and use in source and binary forms, with or without | 15 | * Redistribution and use in source and binary forms, with or without |
15 | * modification, are permitted provided that the following conditions are met: | 16 | * modification, are permitted provided that the following conditions are met: |
16 | * | 17 | * |
17 | * Redistributions of source code must retain the above copyright notice, this | 18 | * Redistributions of source code must retain the above copyright notice, this |
18 | * list of conditions and the following disclaimer. | 19 | * list of conditions and the following disclaimer. |
19 | * | 20 | * |
20 | * Redistributions in binary form must reproduce the above copyright notice, | 21 | * Redistributions in binary form must reproduce the above copyright notice, |
21 | * this list of conditions and the following disclaimer in the documentation | 22 | * this list of conditions and the following disclaimer in the documentation |
22 | * and/or other materials | 23 | * and/or other materials |
23 | * provided with the distribution. | 24 | * provided with the distribution. |
24 | * | 25 | * |
25 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | 26 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
26 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 27 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
27 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
28 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | 29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
29 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 30 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 31 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | 32 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
32 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | 33 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER |
33 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 34 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 35 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | 36 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | 37 | */ |
37 | 38 | ||
@@ -46,12 +47,11 @@ | |||
46 | 47 | ||
47 | extern struct bus_type ibmebus_bus_type; | 48 | extern struct bus_type ibmebus_bus_type; |
48 | 49 | ||
49 | struct ibmebus_dev { | 50 | struct ibmebus_dev { |
50 | const char *name; | ||
51 | struct of_device ofdev; | 51 | struct of_device ofdev; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct ibmebus_driver { | 54 | struct ibmebus_driver { |
55 | char *name; | 55 | char *name; |
56 | struct of_device_id *id_table; | 56 | struct of_device_id *id_table; |
57 | int (*probe) (struct ibmebus_dev *dev, const struct of_device_id *id); | 57 | int (*probe) (struct ibmebus_dev *dev, const struct of_device_id *id); |
@@ -63,7 +63,7 @@ int ibmebus_register_driver(struct ibmebus_driver *drv); | |||
63 | void ibmebus_unregister_driver(struct ibmebus_driver *drv); | 63 | void ibmebus_unregister_driver(struct ibmebus_driver *drv); |
64 | 64 | ||
65 | int ibmebus_request_irq(struct ibmebus_dev *dev, | 65 | int ibmebus_request_irq(struct ibmebus_dev *dev, |
66 | u32 ist, | 66 | u32 ist, |
67 | irq_handler_t handler, | 67 | irq_handler_t handler, |
68 | unsigned long irq_flags, const char * devname, | 68 | unsigned long irq_flags, const char * devname, |
69 | void *dev_id); | 69 | void *dev_id); |
diff --git a/include/asm-powerpc/immap_86xx.h b/include/asm-powerpc/immap_86xx.h index d905b6622268..59b9e07b8e99 100644 --- a/include/asm-powerpc/immap_86xx.h +++ b/include/asm-powerpc/immap_86xx.h | |||
@@ -85,81 +85,6 @@ typedef struct ccsr_pci { | |||
85 | char res19[472]; | 85 | char res19[472]; |
86 | } ccsr_pci_t; | 86 | } ccsr_pci_t; |
87 | 87 | ||
88 | /* PCI Express Registers */ | ||
89 | typedef struct ccsr_pex { | ||
90 | uint pex_config_addr; /* 0x.000 - PCI Express Configuration Address Register */ | ||
91 | uint pex_config_data; /* 0x.004 - PCI Express Configuration Data Register */ | ||
92 | char res1[4]; | ||
93 | uint pex_otb_cpl_tor; /* 0x.00c - PCI Express Outbound completion timeout register */ | ||
94 | uint pex_conf_tor; /* 0x.010 - PCI Express configuration timeout register */ | ||
95 | char res2[12]; | ||
96 | uint pex_pme_mes_dr; /* 0x.020 - PCI Express PME and message detect register */ | ||
97 | uint pex_pme_mes_disr; /* 0x.024 - PCI Express PME and message disable register */ | ||
98 | uint pex_pme_mes_ier; /* 0x.028 - PCI Express PME and message interrupt enable register */ | ||
99 | uint pex_pmcr; /* 0x.02c - PCI Express power management command register */ | ||
100 | char res3[3024]; | ||
101 | uint pexotar0; /* 0x.c00 - PCI Express outbound translation address register 0 */ | ||
102 | uint pexotear0; /* 0x.c04 - PCI Express outbound translation extended address register 0*/ | ||
103 | char res4[8]; | ||
104 | uint pexowar0; /* 0x.c10 - PCI Express outbound window attributes register 0*/ | ||
105 | char res5[12]; | ||
106 | uint pexotar1; /* 0x.c20 - PCI Express outbound translation address register 1 */ | ||
107 | uint pexotear1; /* 0x.c24 - PCI Express outbound translation extended address register 1*/ | ||
108 | uint pexowbar1; /* 0x.c28 - PCI Express outbound window base address register 1*/ | ||
109 | char res6[4]; | ||
110 | uint pexowar1; /* 0x.c30 - PCI Express outbound window attributes register 1*/ | ||
111 | char res7[12]; | ||
112 | uint pexotar2; /* 0x.c40 - PCI Express outbound translation address register 2 */ | ||
113 | uint pexotear2; /* 0x.c44 - PCI Express outbound translation extended address register 2*/ | ||
114 | uint pexowbar2; /* 0x.c48 - PCI Express outbound window base address register 2*/ | ||
115 | char res8[4]; | ||
116 | uint pexowar2; /* 0x.c50 - PCI Express outbound window attributes register 2*/ | ||
117 | char res9[12]; | ||
118 | uint pexotar3; /* 0x.c60 - PCI Express outbound translation address register 3 */ | ||
119 | uint pexotear3; /* 0x.c64 - PCI Express outbound translation extended address register 3*/ | ||
120 | uint pexowbar3; /* 0x.c68 - PCI Express outbound window base address register 3*/ | ||
121 | char res10[4]; | ||
122 | uint pexowar3; /* 0x.c70 - PCI Express outbound window attributes register 3*/ | ||
123 | char res11[12]; | ||
124 | uint pexotar4; /* 0x.c80 - PCI Express outbound translation address register 4 */ | ||
125 | uint pexotear4; /* 0x.c84 - PCI Express outbound translation extended address register 4*/ | ||
126 | uint pexowbar4; /* 0x.c88 - PCI Express outbound window base address register 4*/ | ||
127 | char res12[4]; | ||
128 | uint pexowar4; /* 0x.c90 - PCI Express outbound window attributes register 4*/ | ||
129 | char res13[12]; | ||
130 | char res14[256]; | ||
131 | uint pexitar3; /* 0x.da0 - PCI Express inbound translation address register 3 */ | ||
132 | char res15[4]; | ||
133 | uint pexiwbar3; /* 0x.da8 - PCI Express inbound window base address register 3 */ | ||
134 | uint pexiwbear3; /* 0x.dac - PCI Express inbound window base extended address register 3 */ | ||
135 | uint pexiwar3; /* 0x.db0 - PCI Express inbound window attributes register 3 */ | ||
136 | char res16[12]; | ||
137 | uint pexitar2; /* 0x.dc0 - PCI Express inbound translation address register 2 */ | ||
138 | char res17[4]; | ||
139 | uint pexiwbar2; /* 0x.dc8 - PCI Express inbound window base address register 2 */ | ||
140 | uint pexiwbear2; /* 0x.dcc - PCI Express inbound window base extended address register 2 */ | ||
141 | uint pexiwar2; /* 0x.dd0 - PCI Express inbound window attributes register 2 */ | ||
142 | char res18[12]; | ||
143 | uint pexitar1; /* 0x.de0 - PCI Express inbound translation address register 2 */ | ||
144 | char res19[4]; | ||
145 | uint pexiwbar1; /* 0x.de8 - PCI Express inbound window base address register 2 */ | ||
146 | uint pexiwbear1; /* 0x.dec - PCI Express inbound window base extended address register 2 */ | ||
147 | uint pexiwar1; /* 0x.df0 - PCI Express inbound window attributes register 2 */ | ||
148 | char res20[12]; | ||
149 | uint pex_err_dr; /* 0x.e00 - PCI Express error detect register */ | ||
150 | char res21[4]; | ||
151 | uint pex_err_en; /* 0x.e08 - PCI Express error interrupt enable register */ | ||
152 | char res22[4]; | ||
153 | uint pex_err_disr; /* 0x.e10 - PCI Express error disable register */ | ||
154 | char res23[12]; | ||
155 | uint pex_err_cap_stat; /* 0x.e20 - PCI Express error capture status register */ | ||
156 | char res24[4]; | ||
157 | uint pex_err_cap_r0; /* 0x.e28 - PCI Express error capture register 0 */ | ||
158 | uint pex_err_cap_r1; /* 0x.e2c - PCI Express error capture register 0 */ | ||
159 | uint pex_err_cap_r2; /* 0x.e30 - PCI Express error capture register 0 */ | ||
160 | uint pex_err_cap_r3; /* 0x.e34 - PCI Express error capture register 0 */ | ||
161 | } ccsr_pex_t; | ||
162 | |||
163 | /* Global Utility Registers */ | 88 | /* Global Utility Registers */ |
164 | typedef struct ccsr_guts { | 89 | typedef struct ccsr_guts { |
165 | uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ | 90 | uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ |
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 301c9bb308b1..350c9bdb31dc 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h | |||
@@ -11,7 +11,12 @@ | |||
11 | 11 | ||
12 | /* Check of existence of legacy devices */ | 12 | /* Check of existence of legacy devices */ |
13 | extern int check_legacy_ioport(unsigned long base_port); | 13 | extern int check_legacy_ioport(unsigned long base_port); |
14 | #define PNPBIOS_BASE 0xf000 /* only relevant for PReP */ | 14 | #define I8042_DATA_REG 0x60 |
15 | #define FDC_BASE 0x3f0 | ||
16 | /* only relevant for PReP */ | ||
17 | #define _PIDXR 0x279 | ||
18 | #define _PNPWRP 0xa79 | ||
19 | #define PNPBIOS_BASE 0xf000 | ||
15 | 20 | ||
16 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
17 | #include <asm/page.h> | 22 | #include <asm/page.h> |
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index 3a5dd492588f..f850ca7020ed 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h | |||
@@ -87,6 +87,11 @@ extern void arch_remove_kprobe(struct kprobe *p); | |||
87 | struct arch_specific_insn { | 87 | struct arch_specific_insn { |
88 | /* copy of original instruction */ | 88 | /* copy of original instruction */ |
89 | kprobe_opcode_t *insn; | 89 | kprobe_opcode_t *insn; |
90 | /* | ||
91 | * Set in kprobes code, initially to 0. If the instruction can be | ||
92 | * eumulated, this is set to 1, if not, to -1. | ||
93 | */ | ||
94 | int boostable; | ||
90 | }; | 95 | }; |
91 | 96 | ||
92 | struct prev_kprobe { | 97 | struct prev_kprobe { |
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 1b04e5723548..b204926ce913 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h | |||
@@ -153,9 +153,6 @@ struct machdep_calls { | |||
153 | */ | 153 | */ |
154 | long (*feature_call)(unsigned int feature, ...); | 154 | long (*feature_call)(unsigned int feature, ...); |
155 | 155 | ||
156 | /* Check availability of legacy devices like i8042 */ | ||
157 | int (*check_legacy_ioport)(unsigned int baseport); | ||
158 | |||
159 | /* Get legacy PCI/IDE interrupt mapping */ | 156 | /* Get legacy PCI/IDE interrupt mapping */ |
160 | int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel); | 157 | int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel); |
161 | 158 | ||
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h new file mode 100644 index 000000000000..6739457d8bc0 --- /dev/null +++ b/include/asm-powerpc/mmu-hash64.h | |||
@@ -0,0 +1,400 @@ | |||
1 | #ifndef _ASM_POWERPC_MMU_HASH64_H_ | ||
2 | #define _ASM_POWERPC_MMU_HASH64_H_ | ||
3 | /* | ||
4 | * PowerPC64 memory management structures | ||
5 | * | ||
6 | * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> | ||
7 | * PPC64 rework. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version | ||
12 | * 2 of the License, or (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <asm/asm-compat.h> | ||
16 | #include <asm/page.h> | ||
17 | |||
18 | /* | ||
19 | * Segment table | ||
20 | */ | ||
21 | |||
22 | #define STE_ESID_V 0x80 | ||
23 | #define STE_ESID_KS 0x20 | ||
24 | #define STE_ESID_KP 0x10 | ||
25 | #define STE_ESID_N 0x08 | ||
26 | |||
27 | #define STE_VSID_SHIFT 12 | ||
28 | |||
29 | /* Location of cpu0's segment table */ | ||
30 | #define STAB0_PAGE 0x6 | ||
31 | #define STAB0_OFFSET (STAB0_PAGE << 12) | ||
32 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) | ||
33 | |||
34 | #ifndef __ASSEMBLY__ | ||
35 | extern char initial_stab[]; | ||
36 | #endif /* ! __ASSEMBLY */ | ||
37 | |||
38 | /* | ||
39 | * SLB | ||
40 | */ | ||
41 | |||
42 | #define SLB_NUM_BOLTED 3 | ||
43 | #define SLB_CACHE_ENTRIES 8 | ||
44 | |||
45 | /* Bits in the SLB ESID word */ | ||
46 | #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ | ||
47 | |||
48 | /* Bits in the SLB VSID word */ | ||
49 | #define SLB_VSID_SHIFT 12 | ||
50 | #define SLB_VSID_B ASM_CONST(0xc000000000000000) | ||
51 | #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) | ||
52 | #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) | ||
53 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) | ||
54 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) | ||
55 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ | ||
56 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) | ||
57 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ | ||
58 | #define SLB_VSID_LP ASM_CONST(0x0000000000000030) | ||
59 | #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) | ||
60 | #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) | ||
61 | #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) | ||
62 | #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) | ||
63 | #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) | ||
64 | |||
65 | #define SLB_VSID_KERNEL (SLB_VSID_KP) | ||
66 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) | ||
67 | |||
68 | #define SLBIE_C (0x08000000) | ||
69 | |||
70 | /* | ||
71 | * Hash table | ||
72 | */ | ||
73 | |||
74 | #define HPTES_PER_GROUP 8 | ||
75 | |||
76 | #define HPTE_V_AVPN_SHIFT 7 | ||
77 | #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) | ||
78 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) | ||
79 | #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) | ||
80 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) | ||
81 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) | ||
82 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) | ||
83 | #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) | ||
84 | #define HPTE_V_VALID ASM_CONST(0x0000000000000001) | ||
85 | |||
86 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) | ||
87 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) | ||
88 | #define HPTE_R_RPN_SHIFT 12 | ||
89 | #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) | ||
90 | #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) | ||
91 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) | ||
92 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | ||
93 | #define HPTE_R_C ASM_CONST(0x0000000000000080) | ||
94 | #define HPTE_R_R ASM_CONST(0x0000000000000100) | ||
95 | |||
96 | /* Values for PP (assumes Ks=0, Kp=1) */ | ||
97 | /* pp0 will always be 0 for linux */ | ||
98 | #define PP_RWXX 0 /* Supervisor read/write, User none */ | ||
99 | #define PP_RWRX 1 /* Supervisor read/write, User read */ | ||
100 | #define PP_RWRW 2 /* Supervisor read/write, User read/write */ | ||
101 | #define PP_RXRX 3 /* Supervisor read, User read */ | ||
102 | |||
103 | #ifndef __ASSEMBLY__ | ||
104 | |||
105 | typedef struct { | ||
106 | unsigned long v; | ||
107 | unsigned long r; | ||
108 | } hpte_t; | ||
109 | |||
110 | extern hpte_t *htab_address; | ||
111 | extern unsigned long htab_size_bytes; | ||
112 | extern unsigned long htab_hash_mask; | ||
113 | |||
114 | /* | ||
115 | * Page size definition | ||
116 | * | ||
117 | * shift : is the "PAGE_SHIFT" value for that page size | ||
118 | * sllp : is a bit mask with the value of SLB L || LP to be or'ed | ||
119 | * directly to a slbmte "vsid" value | ||
120 | * penc : is the HPTE encoding mask for the "LP" field: | ||
121 | * | ||
122 | */ | ||
123 | struct mmu_psize_def | ||
124 | { | ||
125 | unsigned int shift; /* number of bits */ | ||
126 | unsigned int penc; /* HPTE encoding */ | ||
127 | unsigned int tlbiel; /* tlbiel supported for that page size */ | ||
128 | unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ | ||
129 | unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ | ||
130 | }; | ||
131 | |||
132 | #endif /* __ASSEMBLY__ */ | ||
133 | |||
134 | /* | ||
135 | * The kernel use the constants below to index in the page sizes array. | ||
136 | * The use of fixed constants for this purpose is better for performances | ||
137 | * of the low level hash refill handlers. | ||
138 | * | ||
139 | * A non supported page size has a "shift" field set to 0 | ||
140 | * | ||
141 | * Any new page size being implemented can get a new entry in here. Whether | ||
142 | * the kernel will use it or not is a different matter though. The actual page | ||
143 | * size used by hugetlbfs is not defined here and may be made variable | ||
144 | */ | ||
145 | |||
146 | #define MMU_PAGE_4K 0 /* 4K */ | ||
147 | #define MMU_PAGE_64K 1 /* 64K */ | ||
148 | #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ | ||
149 | #define MMU_PAGE_1M 3 /* 1M */ | ||
150 | #define MMU_PAGE_16M 4 /* 16M */ | ||
151 | #define MMU_PAGE_16G 5 /* 16G */ | ||
152 | #define MMU_PAGE_COUNT 6 | ||
153 | |||
154 | #ifndef __ASSEMBLY__ | ||
155 | |||
156 | /* | ||
157 | * The current system page sizes | ||
158 | */ | ||
159 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | ||
160 | extern int mmu_linear_psize; | ||
161 | extern int mmu_virtual_psize; | ||
162 | extern int mmu_vmalloc_psize; | ||
163 | extern int mmu_io_psize; | ||
164 | |||
165 | /* | ||
166 | * If the processor supports 64k normal pages but not 64k cache | ||
167 | * inhibited pages, we have to be prepared to switch processes | ||
168 | * to use 4k pages when they create cache-inhibited mappings. | ||
169 | * If this is the case, mmu_ci_restrictions will be set to 1. | ||
170 | */ | ||
171 | extern int mmu_ci_restrictions; | ||
172 | |||
173 | #ifdef CONFIG_HUGETLB_PAGE | ||
174 | /* | ||
175 | * The page size index of the huge pages for use by hugetlbfs | ||
176 | */ | ||
177 | extern int mmu_huge_psize; | ||
178 | |||
179 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
180 | |||
181 | /* | ||
182 | * This function sets the AVPN and L fields of the HPTE appropriately | ||
183 | * for the page size | ||
184 | */ | ||
185 | static inline unsigned long hpte_encode_v(unsigned long va, int psize) | ||
186 | { | ||
187 | unsigned long v = | ||
188 | v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); | ||
189 | v <<= HPTE_V_AVPN_SHIFT; | ||
190 | if (psize != MMU_PAGE_4K) | ||
191 | v |= HPTE_V_LARGE; | ||
192 | return v; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * This function sets the ARPN, and LP fields of the HPTE appropriately | ||
197 | * for the page size. We assume the pa is already "clean" that is properly | ||
198 | * aligned for the requested page size | ||
199 | */ | ||
200 | static inline unsigned long hpte_encode_r(unsigned long pa, int psize) | ||
201 | { | ||
202 | unsigned long r; | ||
203 | |||
204 | /* A 4K page needs no special encoding */ | ||
205 | if (psize == MMU_PAGE_4K) | ||
206 | return pa & HPTE_R_RPN; | ||
207 | else { | ||
208 | unsigned int penc = mmu_psize_defs[psize].penc; | ||
209 | unsigned int shift = mmu_psize_defs[psize].shift; | ||
210 | return (pa & ~((1ul << shift) - 1)) | (penc << 12); | ||
211 | } | ||
212 | return r; | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * This hashes a virtual address for a 256Mb segment only for now | ||
217 | */ | ||
218 | |||
219 | static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) | ||
220 | { | ||
221 | return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); | ||
222 | } | ||
223 | |||
224 | extern int __hash_page_4K(unsigned long ea, unsigned long access, | ||
225 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
226 | unsigned int local); | ||
227 | extern int __hash_page_64K(unsigned long ea, unsigned long access, | ||
228 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
229 | unsigned int local); | ||
230 | struct mm_struct; | ||
231 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); | ||
232 | extern int hash_huge_page(struct mm_struct *mm, unsigned long access, | ||
233 | unsigned long ea, unsigned long vsid, int local, | ||
234 | unsigned long trap); | ||
235 | |||
236 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | ||
237 | unsigned long pstart, unsigned long mode, | ||
238 | int psize); | ||
239 | |||
240 | extern void htab_initialize(void); | ||
241 | extern void htab_initialize_secondary(void); | ||
242 | extern void hpte_init_native(void); | ||
243 | extern void hpte_init_lpar(void); | ||
244 | extern void hpte_init_iSeries(void); | ||
245 | extern void hpte_init_beat(void); | ||
246 | |||
247 | extern void stabs_alloc(void); | ||
248 | extern void slb_initialize(void); | ||
249 | extern void slb_flush_and_rebolt(void); | ||
250 | extern void stab_initialize(unsigned long stab); | ||
251 | |||
252 | #endif /* __ASSEMBLY__ */ | ||
253 | |||
254 | /* | ||
255 | * VSID allocation | ||
256 | * | ||
257 | * We first generate a 36-bit "proto-VSID". For kernel addresses this | ||
258 | * is equal to the ESID, for user addresses it is: | ||
259 | * (context << 15) | (esid & 0x7fff) | ||
260 | * | ||
261 | * The two forms are distinguishable because the top bit is 0 for user | ||
262 | * addresses, whereas the top two bits are 1 for kernel addresses. | ||
263 | * Proto-VSIDs with the top two bits equal to 0b10 are reserved for | ||
264 | * now. | ||
265 | * | ||
266 | * The proto-VSIDs are then scrambled into real VSIDs with the | ||
267 | * multiplicative hash: | ||
268 | * | ||
269 | * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS | ||
270 | * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 | ||
271 | * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF | ||
272 | * | ||
273 | * This scramble is only well defined for proto-VSIDs below | ||
274 | * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are | ||
275 | * reserved. VSID_MULTIPLIER is prime, so in particular it is | ||
276 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. | ||
277 | * Because the modulus is 2^n-1 we can compute it efficiently without | ||
278 | * a divide or extra multiply (see below). | ||
279 | * | ||
280 | * This scheme has several advantages over older methods: | ||
281 | * | ||
282 | * - We have VSIDs allocated for every kernel address | ||
283 | * (i.e. everything above 0xC000000000000000), except the very top | ||
284 | * segment, which simplifies several things. | ||
285 | * | ||
286 | * - We allow for 15 significant bits of ESID and 20 bits of | ||
287 | * context for user addresses. i.e. 8T (43 bits) of address space for | ||
288 | * up to 1M contexts (although the page table structure and context | ||
289 | * allocation will need changes to take advantage of this). | ||
290 | * | ||
291 | * - The scramble function gives robust scattering in the hash | ||
292 | * table (at least based on some initial results). The previous | ||
293 | * method was more susceptible to pathological cases giving excessive | ||
294 | * hash collisions. | ||
295 | */ | ||
296 | /* | ||
297 | * WARNING - If you change these you must make sure the asm | ||
298 | * implementations in slb_allocate (slb_low.S), do_stab_bolted | ||
299 | * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. | ||
300 | * | ||
301 | * You'll also need to change the precomputed VSID values in head.S | ||
302 | * which are used by the iSeries firmware. | ||
303 | */ | ||
304 | |||
305 | #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ | ||
306 | #define VSID_BITS 36 | ||
307 | #define VSID_MODULUS ((1UL<<VSID_BITS)-1) | ||
308 | |||
309 | #define CONTEXT_BITS 19 | ||
310 | #define USER_ESID_BITS 16 | ||
311 | |||
312 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | ||
313 | |||
314 | /* | ||
315 | * This macro generates asm code to compute the VSID scramble | ||
316 | * function. Used in slb_allocate() and do_stab_bolted. The function | ||
317 | * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS | ||
318 | * | ||
319 | * rt = register continaing the proto-VSID and into which the | ||
320 | * VSID will be stored | ||
321 | * rx = scratch register (clobbered) | ||
322 | * | ||
323 | * - rt and rx must be different registers | ||
324 | * - The answer will end up in the low 36 bits of rt. The higher | ||
325 | * bits may contain other garbage, so you may need to mask the | ||
326 | * result. | ||
327 | */ | ||
328 | #define ASM_VSID_SCRAMBLE(rt, rx) \ | ||
329 | lis rx,VSID_MULTIPLIER@h; \ | ||
330 | ori rx,rx,VSID_MULTIPLIER@l; \ | ||
331 | mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ | ||
332 | \ | ||
333 | srdi rx,rt,VSID_BITS; \ | ||
334 | clrldi rt,rt,(64-VSID_BITS); \ | ||
335 | add rt,rt,rx; /* add high and low bits */ \ | ||
336 | /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | ||
337 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ | ||
338 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ | ||
339 | * the bit clear, r3 already has the answer we want, if it \ | ||
340 | * doesn't, the answer is the low 36 bits of r3+1. So in all \ | ||
341 | * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ | ||
342 | addi rx,rt,1; \ | ||
343 | srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ | ||
344 | add rt,rt,rx | ||
345 | |||
346 | |||
347 | #ifndef __ASSEMBLY__ | ||
348 | |||
349 | typedef unsigned long mm_context_id_t; | ||
350 | |||
351 | typedef struct { | ||
352 | mm_context_id_t id; | ||
353 | u16 user_psize; /* page size index */ | ||
354 | u16 sllp; /* SLB entry page size encoding */ | ||
355 | #ifdef CONFIG_HUGETLB_PAGE | ||
356 | u16 low_htlb_areas, high_htlb_areas; | ||
357 | #endif | ||
358 | unsigned long vdso_base; | ||
359 | } mm_context_t; | ||
360 | |||
361 | |||
362 | static inline unsigned long vsid_scramble(unsigned long protovsid) | ||
363 | { | ||
364 | #if 0 | ||
365 | /* The code below is equivalent to this function for arguments | ||
366 | * < 2^VSID_BITS, which is all this should ever be called | ||
367 | * with. However gcc is not clever enough to compute the | ||
368 | * modulus (2^n-1) without a second multiply. */ | ||
369 | return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); | ||
370 | #else /* 1 */ | ||
371 | unsigned long x; | ||
372 | |||
373 | x = protovsid * VSID_MULTIPLIER; | ||
374 | x = (x >> VSID_BITS) + (x & VSID_MODULUS); | ||
375 | return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; | ||
376 | #endif /* 1 */ | ||
377 | } | ||
378 | |||
379 | /* This is only valid for addresses >= KERNELBASE */ | ||
380 | static inline unsigned long get_kernel_vsid(unsigned long ea) | ||
381 | { | ||
382 | return vsid_scramble(ea >> SID_SHIFT); | ||
383 | } | ||
384 | |||
385 | /* This is only valid for user addresses (which are below 2^41) */ | ||
386 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea) | ||
387 | { | ||
388 | return vsid_scramble((context << USER_ESID_BITS) | ||
389 | | (ea >> SID_SHIFT)); | ||
390 | } | ||
391 | |||
392 | #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) | ||
393 | #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) | ||
394 | |||
395 | /* Physical address used by some IO functions */ | ||
396 | typedef unsigned long phys_addr_t; | ||
397 | |||
398 | #endif /* __ASSEMBLY__ */ | ||
399 | |||
400 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ | ||
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 200055a4b82b..06b3e6d336cb 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h | |||
@@ -2,407 +2,14 @@ | |||
2 | #define _ASM_POWERPC_MMU_H_ | 2 | #define _ASM_POWERPC_MMU_H_ |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #ifndef CONFIG_PPC64 | 5 | #ifdef CONFIG_PPC64 |
6 | #include <asm-ppc/mmu.h> | 6 | /* 64-bit classic hash table MMU */ |
7 | # include <asm/mmu-hash64.h> | ||
7 | #else | 8 | #else |
8 | 9 | /* 32-bit. FIXME: split up the 32-bit MMU types, and revise for | |
9 | /* | 10 | * arch/powerpc */ |
10 | * PowerPC memory management structures | 11 | # include <asm-ppc/mmu.h> |
11 | * | ||
12 | * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> | ||
13 | * PPC64 rework. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | */ | ||
20 | |||
21 | #include <asm/asm-compat.h> | ||
22 | #include <asm/page.h> | ||
23 | |||
24 | /* | ||
25 | * Segment table | ||
26 | */ | ||
27 | |||
28 | #define STE_ESID_V 0x80 | ||
29 | #define STE_ESID_KS 0x20 | ||
30 | #define STE_ESID_KP 0x10 | ||
31 | #define STE_ESID_N 0x08 | ||
32 | |||
33 | #define STE_VSID_SHIFT 12 | ||
34 | |||
35 | /* Location of cpu0's segment table */ | ||
36 | #define STAB0_PAGE 0x6 | ||
37 | #define STAB0_OFFSET (STAB0_PAGE << 12) | ||
38 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) | ||
39 | |||
40 | #ifndef __ASSEMBLY__ | ||
41 | extern char initial_stab[]; | ||
42 | #endif /* ! __ASSEMBLY */ | ||
43 | |||
44 | /* | ||
45 | * SLB | ||
46 | */ | ||
47 | |||
48 | #define SLB_NUM_BOLTED 3 | ||
49 | #define SLB_CACHE_ENTRIES 8 | ||
50 | |||
51 | /* Bits in the SLB ESID word */ | ||
52 | #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ | ||
53 | |||
54 | /* Bits in the SLB VSID word */ | ||
55 | #define SLB_VSID_SHIFT 12 | ||
56 | #define SLB_VSID_B ASM_CONST(0xc000000000000000) | ||
57 | #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) | ||
58 | #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) | ||
59 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) | ||
60 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) | ||
61 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ | ||
62 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) | ||
63 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ | ||
64 | #define SLB_VSID_LP ASM_CONST(0x0000000000000030) | ||
65 | #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) | ||
66 | #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) | ||
67 | #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) | ||
68 | #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) | ||
69 | #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) | ||
70 | |||
71 | #define SLB_VSID_KERNEL (SLB_VSID_KP) | ||
72 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) | ||
73 | |||
74 | #define SLBIE_C (0x08000000) | ||
75 | |||
76 | /* | ||
77 | * Hash table | ||
78 | */ | ||
79 | |||
80 | #define HPTES_PER_GROUP 8 | ||
81 | |||
82 | #define HPTE_V_AVPN_SHIFT 7 | ||
83 | #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) | ||
84 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) | ||
85 | #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) | ||
86 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) | ||
87 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) | ||
88 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) | ||
89 | #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) | ||
90 | #define HPTE_V_VALID ASM_CONST(0x0000000000000001) | ||
91 | |||
92 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) | ||
93 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) | ||
94 | #define HPTE_R_RPN_SHIFT 12 | ||
95 | #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) | ||
96 | #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) | ||
97 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) | ||
98 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | ||
99 | #define HPTE_R_C ASM_CONST(0x0000000000000080) | ||
100 | #define HPTE_R_R ASM_CONST(0x0000000000000100) | ||
101 | |||
102 | /* Values for PP (assumes Ks=0, Kp=1) */ | ||
103 | /* pp0 will always be 0 for linux */ | ||
104 | #define PP_RWXX 0 /* Supervisor read/write, User none */ | ||
105 | #define PP_RWRX 1 /* Supervisor read/write, User read */ | ||
106 | #define PP_RWRW 2 /* Supervisor read/write, User read/write */ | ||
107 | #define PP_RXRX 3 /* Supervisor read, User read */ | ||
108 | |||
109 | #ifndef __ASSEMBLY__ | ||
110 | |||
111 | typedef struct { | ||
112 | unsigned long v; | ||
113 | unsigned long r; | ||
114 | } hpte_t; | ||
115 | |||
116 | extern hpte_t *htab_address; | ||
117 | extern unsigned long htab_size_bytes; | ||
118 | extern unsigned long htab_hash_mask; | ||
119 | |||
120 | /* | ||
121 | * Page size definition | ||
122 | * | ||
123 | * shift : is the "PAGE_SHIFT" value for that page size | ||
124 | * sllp : is a bit mask with the value of SLB L || LP to be or'ed | ||
125 | * directly to a slbmte "vsid" value | ||
126 | * penc : is the HPTE encoding mask for the "LP" field: | ||
127 | * | ||
128 | */ | ||
129 | struct mmu_psize_def | ||
130 | { | ||
131 | unsigned int shift; /* number of bits */ | ||
132 | unsigned int penc; /* HPTE encoding */ | ||
133 | unsigned int tlbiel; /* tlbiel supported for that page size */ | ||
134 | unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ | ||
135 | unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ | ||
136 | }; | ||
137 | |||
138 | #endif /* __ASSEMBLY__ */ | ||
139 | |||
140 | /* | ||
141 | * The kernel use the constants below to index in the page sizes array. | ||
142 | * The use of fixed constants for this purpose is better for performances | ||
143 | * of the low level hash refill handlers. | ||
144 | * | ||
145 | * A non supported page size has a "shift" field set to 0 | ||
146 | * | ||
147 | * Any new page size being implemented can get a new entry in here. Whether | ||
148 | * the kernel will use it or not is a different matter though. The actual page | ||
149 | * size used by hugetlbfs is not defined here and may be made variable | ||
150 | */ | ||
151 | |||
152 | #define MMU_PAGE_4K 0 /* 4K */ | ||
153 | #define MMU_PAGE_64K 1 /* 64K */ | ||
154 | #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ | ||
155 | #define MMU_PAGE_1M 3 /* 1M */ | ||
156 | #define MMU_PAGE_16M 4 /* 16M */ | ||
157 | #define MMU_PAGE_16G 5 /* 16G */ | ||
158 | #define MMU_PAGE_COUNT 6 | ||
159 | |||
160 | #ifndef __ASSEMBLY__ | ||
161 | |||
162 | /* | ||
163 | * The current system page sizes | ||
164 | */ | ||
165 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | ||
166 | extern int mmu_linear_psize; | ||
167 | extern int mmu_virtual_psize; | ||
168 | extern int mmu_vmalloc_psize; | ||
169 | extern int mmu_io_psize; | ||
170 | |||
171 | /* | ||
172 | * If the processor supports 64k normal pages but not 64k cache | ||
173 | * inhibited pages, we have to be prepared to switch processes | ||
174 | * to use 4k pages when they create cache-inhibited mappings. | ||
175 | * If this is the case, mmu_ci_restrictions will be set to 1. | ||
176 | */ | ||
177 | extern int mmu_ci_restrictions; | ||
178 | |||
179 | #ifdef CONFIG_HUGETLB_PAGE | ||
180 | /* | ||
181 | * The page size index of the huge pages for use by hugetlbfs | ||
182 | */ | ||
183 | extern int mmu_huge_psize; | ||
184 | |||
185 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
186 | |||
187 | /* | ||
188 | * This function sets the AVPN and L fields of the HPTE appropriately | ||
189 | * for the page size | ||
190 | */ | ||
191 | static inline unsigned long hpte_encode_v(unsigned long va, int psize) | ||
192 | { | ||
193 | unsigned long v = | ||
194 | v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); | ||
195 | v <<= HPTE_V_AVPN_SHIFT; | ||
196 | if (psize != MMU_PAGE_4K) | ||
197 | v |= HPTE_V_LARGE; | ||
198 | return v; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * This function sets the ARPN, and LP fields of the HPTE appropriately | ||
203 | * for the page size. We assume the pa is already "clean" that is properly | ||
204 | * aligned for the requested page size | ||
205 | */ | ||
206 | static inline unsigned long hpte_encode_r(unsigned long pa, int psize) | ||
207 | { | ||
208 | unsigned long r; | ||
209 | |||
210 | /* A 4K page needs no special encoding */ | ||
211 | if (psize == MMU_PAGE_4K) | ||
212 | return pa & HPTE_R_RPN; | ||
213 | else { | ||
214 | unsigned int penc = mmu_psize_defs[psize].penc; | ||
215 | unsigned int shift = mmu_psize_defs[psize].shift; | ||
216 | return (pa & ~((1ul << shift) - 1)) | (penc << 12); | ||
217 | } | ||
218 | return r; | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * This hashes a virtual address for a 256Mb segment only for now | ||
223 | */ | ||
224 | |||
225 | static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) | ||
226 | { | ||
227 | return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); | ||
228 | } | ||
229 | |||
230 | extern int __hash_page_4K(unsigned long ea, unsigned long access, | ||
231 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
232 | unsigned int local); | ||
233 | extern int __hash_page_64K(unsigned long ea, unsigned long access, | ||
234 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
235 | unsigned int local); | ||
236 | struct mm_struct; | ||
237 | extern int hash_huge_page(struct mm_struct *mm, unsigned long access, | ||
238 | unsigned long ea, unsigned long vsid, int local, | ||
239 | unsigned long trap); | ||
240 | |||
241 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | ||
242 | unsigned long pstart, unsigned long mode, | ||
243 | int psize); | ||
244 | |||
245 | extern void htab_initialize(void); | ||
246 | extern void htab_initialize_secondary(void); | ||
247 | extern void hpte_init_native(void); | ||
248 | extern void hpte_init_lpar(void); | ||
249 | extern void hpte_init_iSeries(void); | ||
250 | extern void hpte_init_beat(void); | ||
251 | |||
252 | extern void stabs_alloc(void); | ||
253 | extern void slb_initialize(void); | ||
254 | extern void slb_flush_and_rebolt(void); | ||
255 | extern void stab_initialize(unsigned long stab); | ||
256 | |||
257 | #endif /* __ASSEMBLY__ */ | ||
258 | |||
259 | /* | ||
260 | * VSID allocation | ||
261 | * | ||
262 | * We first generate a 36-bit "proto-VSID". For kernel addresses this | ||
263 | * is equal to the ESID, for user addresses it is: | ||
264 | * (context << 15) | (esid & 0x7fff) | ||
265 | * | ||
266 | * The two forms are distinguishable because the top bit is 0 for user | ||
267 | * addresses, whereas the top two bits are 1 for kernel addresses. | ||
268 | * Proto-VSIDs with the top two bits equal to 0b10 are reserved for | ||
269 | * now. | ||
270 | * | ||
271 | * The proto-VSIDs are then scrambled into real VSIDs with the | ||
272 | * multiplicative hash: | ||
273 | * | ||
274 | * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS | ||
275 | * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 | ||
276 | * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF | ||
277 | * | ||
278 | * This scramble is only well defined for proto-VSIDs below | ||
279 | * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are | ||
280 | * reserved. VSID_MULTIPLIER is prime, so in particular it is | ||
281 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. | ||
282 | * Because the modulus is 2^n-1 we can compute it efficiently without | ||
283 | * a divide or extra multiply (see below). | ||
284 | * | ||
285 | * This scheme has several advantages over older methods: | ||
286 | * | ||
287 | * - We have VSIDs allocated for every kernel address | ||
288 | * (i.e. everything above 0xC000000000000000), except the very top | ||
289 | * segment, which simplifies several things. | ||
290 | * | ||
291 | * - We allow for 15 significant bits of ESID and 20 bits of | ||
292 | * context for user addresses. i.e. 8T (43 bits) of address space for | ||
293 | * up to 1M contexts (although the page table structure and context | ||
294 | * allocation will need changes to take advantage of this). | ||
295 | * | ||
296 | * - The scramble function gives robust scattering in the hash | ||
297 | * table (at least based on some initial results). The previous | ||
298 | * method was more susceptible to pathological cases giving excessive | ||
299 | * hash collisions. | ||
300 | */ | ||
301 | /* | ||
302 | * WARNING - If you change these you must make sure the asm | ||
303 | * implementations in slb_allocate (slb_low.S), do_stab_bolted | ||
304 | * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. | ||
305 | * | ||
306 | * You'll also need to change the precomputed VSID values in head.S | ||
307 | * which are used by the iSeries firmware. | ||
308 | */ | ||
309 | |||
310 | #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ | ||
311 | #define VSID_BITS 36 | ||
312 | #define VSID_MODULUS ((1UL<<VSID_BITS)-1) | ||
313 | |||
314 | #define CONTEXT_BITS 19 | ||
315 | #define USER_ESID_BITS 16 | ||
316 | |||
317 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | ||
318 | |||
319 | /* | ||
320 | * This macro generates asm code to compute the VSID scramble | ||
321 | * function. Used in slb_allocate() and do_stab_bolted. The function | ||
322 | * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS | ||
323 | * | ||
324 | * rt = register continaing the proto-VSID and into which the | ||
325 | * VSID will be stored | ||
326 | * rx = scratch register (clobbered) | ||
327 | * | ||
328 | * - rt and rx must be different registers | ||
329 | * - The answer will end up in the low 36 bits of rt. The higher | ||
330 | * bits may contain other garbage, so you may need to mask the | ||
331 | * result. | ||
332 | */ | ||
333 | #define ASM_VSID_SCRAMBLE(rt, rx) \ | ||
334 | lis rx,VSID_MULTIPLIER@h; \ | ||
335 | ori rx,rx,VSID_MULTIPLIER@l; \ | ||
336 | mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ | ||
337 | \ | ||
338 | srdi rx,rt,VSID_BITS; \ | ||
339 | clrldi rt,rt,(64-VSID_BITS); \ | ||
340 | add rt,rt,rx; /* add high and low bits */ \ | ||
341 | /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | ||
342 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ | ||
343 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ | ||
344 | * the bit clear, r3 already has the answer we want, if it \ | ||
345 | * doesn't, the answer is the low 36 bits of r3+1. So in all \ | ||
346 | * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ | ||
347 | addi rx,rt,1; \ | ||
348 | srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ | ||
349 | add rt,rt,rx | ||
350 | |||
351 | |||
352 | #ifndef __ASSEMBLY__ | ||
353 | |||
354 | typedef unsigned long mm_context_id_t; | ||
355 | |||
356 | typedef struct { | ||
357 | mm_context_id_t id; | ||
358 | u16 user_psize; /* page size index */ | ||
359 | u16 sllp; /* SLB entry page size encoding */ | ||
360 | #ifdef CONFIG_HUGETLB_PAGE | ||
361 | u16 low_htlb_areas, high_htlb_areas; | ||
362 | #endif | 12 | #endif |
363 | unsigned long vdso_base; | ||
364 | } mm_context_t; | ||
365 | |||
366 | |||
367 | static inline unsigned long vsid_scramble(unsigned long protovsid) | ||
368 | { | ||
369 | #if 0 | ||
370 | /* The code below is equivalent to this function for arguments | ||
371 | * < 2^VSID_BITS, which is all this should ever be called | ||
372 | * with. However gcc is not clever enough to compute the | ||
373 | * modulus (2^n-1) without a second multiply. */ | ||
374 | return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); | ||
375 | #else /* 1 */ | ||
376 | unsigned long x; | ||
377 | |||
378 | x = protovsid * VSID_MULTIPLIER; | ||
379 | x = (x >> VSID_BITS) + (x & VSID_MODULUS); | ||
380 | return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; | ||
381 | #endif /* 1 */ | ||
382 | } | ||
383 | |||
384 | /* This is only valid for addresses >= KERNELBASE */ | ||
385 | static inline unsigned long get_kernel_vsid(unsigned long ea) | ||
386 | { | ||
387 | return vsid_scramble(ea >> SID_SHIFT); | ||
388 | } | ||
389 | |||
390 | /* This is only valid for user addresses (which are below 2^41) */ | ||
391 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea) | ||
392 | { | ||
393 | return vsid_scramble((context << USER_ESID_BITS) | ||
394 | | (ea >> SID_SHIFT)); | ||
395 | } | ||
396 | |||
397 | #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) | ||
398 | #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) | ||
399 | |||
400 | /* Physical address used by some IO functions */ | ||
401 | typedef unsigned long phys_addr_t; | ||
402 | |||
403 | |||
404 | #endif /* __ASSEMBLY */ | ||
405 | 13 | ||
406 | #endif /* CONFIG_PPC64 */ | ||
407 | #endif /* __KERNEL__ */ | 14 | #endif /* __KERNEL__ */ |
408 | #endif /* _ASM_POWERPC_MMU_H_ */ | 15 | #endif /* _ASM_POWERPC_MMU_H_ */ |
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index cb204a71e912..e4d5fc5362a0 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h | |||
@@ -199,7 +199,7 @@ enum { | |||
199 | }; | 199 | }; |
200 | 200 | ||
201 | 201 | ||
202 | #ifdef CONFIG_MPIC_BROKEN_U3 | 202 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
203 | /* Fixup table entry */ | 203 | /* Fixup table entry */ |
204 | struct mpic_irq_fixup | 204 | struct mpic_irq_fixup |
205 | { | 205 | { |
@@ -208,7 +208,7 @@ struct mpic_irq_fixup | |||
208 | u32 data; | 208 | u32 data; |
209 | unsigned int index; | 209 | unsigned int index; |
210 | }; | 210 | }; |
211 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | 211 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ |
212 | 212 | ||
213 | 213 | ||
214 | enum mpic_reg_type { | 214 | enum mpic_reg_type { |
@@ -239,7 +239,7 @@ struct mpic | |||
239 | 239 | ||
240 | /* The "linux" controller struct */ | 240 | /* The "linux" controller struct */ |
241 | struct irq_chip hc_irq; | 241 | struct irq_chip hc_irq; |
242 | #ifdef CONFIG_MPIC_BROKEN_U3 | 242 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
243 | struct irq_chip hc_ht_irq; | 243 | struct irq_chip hc_ht_irq; |
244 | #endif | 244 | #endif |
245 | #ifdef CONFIG_SMP | 245 | #ifdef CONFIG_SMP |
@@ -268,7 +268,7 @@ struct mpic | |||
268 | /* Spurious vector to program into unused sources */ | 268 | /* Spurious vector to program into unused sources */ |
269 | unsigned int spurious_vec; | 269 | unsigned int spurious_vec; |
270 | 270 | ||
271 | #ifdef CONFIG_MPIC_BROKEN_U3 | 271 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
272 | /* The fixup table */ | 272 | /* The fixup table */ |
273 | struct mpic_irq_fixup *fixups; | 273 | struct mpic_irq_fixup *fixups; |
274 | spinlock_t fixup_lock; | 274 | spinlock_t fixup_lock; |
@@ -313,7 +313,7 @@ struct mpic | |||
313 | /* Set this for a big-endian MPIC */ | 313 | /* Set this for a big-endian MPIC */ |
314 | #define MPIC_BIG_ENDIAN 0x00000002 | 314 | #define MPIC_BIG_ENDIAN 0x00000002 |
315 | /* Broken U3 MPIC */ | 315 | /* Broken U3 MPIC */ |
316 | #define MPIC_BROKEN_U3 0x00000004 | 316 | #define MPIC_U3_HT_IRQS 0x00000004 |
317 | /* Broken IPI registers (autodetected) */ | 317 | /* Broken IPI registers (autodetected) */ |
318 | #define MPIC_BROKEN_IPI 0x00000008 | 318 | #define MPIC_BROKEN_IPI 0x00000008 |
319 | /* MPIC wants a reset */ | 319 | /* MPIC wants a reset */ |
@@ -352,7 +352,7 @@ struct mpic | |||
352 | * @senses_num: number of entries in the array | 352 | * @senses_num: number of entries in the array |
353 | * | 353 | * |
354 | * Note about the sense array. If none is passed, all interrupts are | 354 | * Note about the sense array. If none is passed, all interrupts are |
355 | * setup to be level negative unless MPIC_BROKEN_U3 is set in which | 355 | * setup to be level negative unless MPIC_U3_HT_IRQS is set in which |
356 | * case they are edge positive (and the array is ignored anyway). | 356 | * case they are edge positive (and the array is ignored anyway). |
357 | * The values in the array start at the first source of the MPIC, | 357 | * The values in the array start at the first source of the MPIC, |
358 | * that is senses[0] correspond to linux irq "irq_offset". | 358 | * that is senses[0] correspond to linux irq "irq_offset". |
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h index a889b2005bf5..4f1aabe0ce73 100644 --- a/include/asm-powerpc/of_device.h +++ b/include/asm-powerpc/of_device.h | |||
@@ -32,5 +32,8 @@ extern int of_device_register(struct of_device *ofdev); | |||
32 | extern void of_device_unregister(struct of_device *ofdev); | 32 | extern void of_device_unregister(struct of_device *ofdev); |
33 | extern void of_release_dev(struct device *dev); | 33 | extern void of_release_dev(struct device *dev); |
34 | 34 | ||
35 | extern int of_device_uevent(struct device *dev, | ||
36 | char **envp, int num_envp, char *buffer, int buffer_size); | ||
37 | |||
35 | #endif /* __KERNEL__ */ | 38 | #endif /* __KERNEL__ */ |
36 | #endif /* _ASM_POWERPC_OF_DEVICE_H */ | 39 | #endif /* _ASM_POWERPC_OF_DEVICE_H */ |
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h index 94c0ad2bff96..8d6b47f7b300 100644 --- a/include/asm-powerpc/oprofile_impl.h +++ b/include/asm-powerpc/oprofile_impl.h | |||
@@ -57,6 +57,8 @@ extern struct op_powerpc_model op_model_rs64; | |||
57 | extern struct op_powerpc_model op_model_power4; | 57 | extern struct op_powerpc_model op_model_power4; |
58 | extern struct op_powerpc_model op_model_7450; | 58 | extern struct op_powerpc_model op_model_7450; |
59 | extern struct op_powerpc_model op_model_cell; | 59 | extern struct op_powerpc_model op_model_cell; |
60 | extern struct op_powerpc_model op_model_pa6t; | ||
61 | |||
60 | 62 | ||
61 | /* All the classic PPC parts use these */ | 63 | /* All the classic PPC parts use these */ |
62 | static inline unsigned int classic_ctr_read(unsigned int i) | 64 | static inline unsigned int classic_ctr_read(unsigned int i) |
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 0d3adc09c847..cf95274f735e 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h | |||
@@ -70,6 +70,7 @@ struct paca_struct { | |||
70 | s16 hw_cpu_id; /* Physical processor number */ | 70 | s16 hw_cpu_id; /* Physical processor number */ |
71 | u8 cpu_start; /* At startup, processor spins until */ | 71 | u8 cpu_start; /* At startup, processor spins until */ |
72 | /* this becomes non-zero. */ | 72 | /* this becomes non-zero. */ |
73 | struct slb_shadow *slb_shadow_ptr; | ||
73 | 74 | ||
74 | /* | 75 | /* |
75 | * Now, starting in cacheline 2, the exception save areas | 76 | * Now, starting in cacheline 2, the exception save areas |
@@ -93,6 +94,7 @@ struct paca_struct { | |||
93 | u64 stab_rr; /* stab/slb round-robin counter */ | 94 | u64 stab_rr; /* stab/slb round-robin counter */ |
94 | u64 saved_r1; /* r1 save for RTAS calls */ | 95 | u64 saved_r1; /* r1 save for RTAS calls */ |
95 | u64 saved_msr; /* MSR saved here by enter_rtas */ | 96 | u64 saved_msr; /* MSR saved here by enter_rtas */ |
97 | u16 trap_save; /* Used when bad stack is encountered */ | ||
96 | u8 soft_enabled; /* irq soft-enable flag */ | 98 | u8 soft_enabled; /* irq soft-enable flag */ |
97 | u8 hard_enabled; /* set if irqs are enabled in MSR */ | 99 | u8 hard_enabled; /* set if irqs are enabled in MSR */ |
98 | u8 io_sync; /* writel() needs spin_unlock sync */ | 100 | u8 io_sync; /* writel() needs spin_unlock sync */ |
@@ -101,8 +103,6 @@ struct paca_struct { | |||
101 | u64 user_time; /* accumulated usermode TB ticks */ | 103 | u64 user_time; /* accumulated usermode TB ticks */ |
102 | u64 system_time; /* accumulated system TB ticks */ | 104 | u64 system_time; /* accumulated system TB ticks */ |
103 | u64 startpurr; /* PURR/TB value snapshot */ | 105 | u64 startpurr; /* PURR/TB value snapshot */ |
104 | |||
105 | struct slb_shadow *slb_shadow_ptr; | ||
106 | }; | 106 | }; |
107 | 107 | ||
108 | extern struct paca_struct paca[]; | 108 | extern struct paca_struct paca[]; |
diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h index 3fca21ddf546..b37b81e37278 100644 --- a/include/asm-powerpc/parport.h +++ b/include/asm-powerpc/parport.h | |||
@@ -20,18 +20,18 @@ extern struct parport *parport_pc_probe_port (unsigned long int base, | |||
20 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) | 20 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) |
21 | { | 21 | { |
22 | struct device_node *np; | 22 | struct device_node *np; |
23 | u32 *prop; | 23 | const u32 *prop; |
24 | u32 io1, io2; | 24 | u32 io1, io2; |
25 | int propsize; | 25 | int propsize; |
26 | int count = 0; | 26 | int count = 0; |
27 | for (np = NULL; (np = of_find_compatible_node(np, | 27 | for (np = NULL; (np = of_find_compatible_node(np, |
28 | "parallel", | 28 | "parallel", |
29 | "pnpPNP,400")) != NULL;) { | 29 | "pnpPNP,400")) != NULL;) { |
30 | prop = (u32 *)get_property(np, "reg", &propsize); | 30 | prop = of_get_property(np, "reg", &propsize); |
31 | if (!prop || propsize > 6*sizeof(u32)) | 31 | if (!prop || propsize > 6*sizeof(u32)) |
32 | continue; | 32 | continue; |
33 | io1 = prop[1]; io2 = prop[2]; | 33 | io1 = prop[1]; io2 = prop[2]; |
34 | prop = (u32 *)get_property(np, "interrupts", NULL); | 34 | prop = of_get_property(np, "interrupts", NULL); |
35 | if (!prop) | 35 | if (!prop) |
36 | continue; | 36 | continue; |
37 | if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) | 37 | if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) |
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h index ac656ee6bb19..ce0f13e8eb14 100644 --- a/include/asm-powerpc/pci.h +++ b/include/asm-powerpc/pci.h | |||
@@ -70,19 +70,22 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
70 | */ | 70 | */ |
71 | #define PCI_DISABLE_MWI | 71 | #define PCI_DISABLE_MWI |
72 | 72 | ||
73 | extern struct dma_mapping_ops *pci_dma_ops; | 73 | #ifdef CONFIG_PCI |
74 | extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); | ||
75 | extern struct dma_mapping_ops *get_pci_dma_ops(void); | ||
74 | 76 | ||
75 | /* For DAC DMA, we currently don't support it by default, but | 77 | /* For DAC DMA, we currently don't support it by default, but |
76 | * we let 64-bit platforms override this. | 78 | * we let 64-bit platforms override this. |
77 | */ | 79 | */ |
78 | static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) | 80 | static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) |
79 | { | 81 | { |
80 | if (pci_dma_ops && pci_dma_ops->dac_dma_supported) | 82 | struct dma_mapping_ops *d = get_pci_dma_ops(); |
81 | return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask); | 83 | |
84 | if (d && d->dac_dma_supported) | ||
85 | return d->dac_dma_supported(&hwdev->dev, mask); | ||
82 | return 0; | 86 | return 0; |
83 | } | 87 | } |
84 | 88 | ||
85 | #ifdef CONFIG_PCI | ||
86 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | 89 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, |
87 | enum pci_dma_burst_strategy *strat, | 90 | enum pci_dma_burst_strategy *strat, |
88 | unsigned long *strategy_parameter) | 91 | unsigned long *strategy_parameter) |
@@ -99,6 +102,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
99 | *strat = PCI_DMA_BURST_MULTIPLE; | 102 | *strat = PCI_DMA_BURST_MULTIPLE; |
100 | *strategy_parameter = cacheline_size; | 103 | *strategy_parameter = cacheline_size; |
101 | } | 104 | } |
105 | #else /* CONFIG_PCI */ | ||
106 | #define set_pci_dma_ops(d) | ||
107 | #define get_pci_dma_ops() NULL | ||
102 | #endif | 108 | #endif |
103 | 109 | ||
104 | extern int pci_domain_nr(struct pci_bus *bus); | 110 | extern int pci_domain_nr(struct pci_bus *bus); |
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 345d9b07b3e2..a28fa8bc01da 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h | |||
@@ -97,3 +97,6 @@ | |||
97 | 97 | ||
98 | #define pud_ERROR(e) \ | 98 | #define pud_ERROR(e) \ |
99 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | 99 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) |
100 | |||
101 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | ||
102 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) | ||
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 4b7126c53f37..5e84f070eaf7 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ | 35 | #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ |
36 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ | 36 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ |
37 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ | 37 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ |
38 | #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ | ||
38 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ | 39 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ |
39 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ | 40 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ |
40 | 41 | ||
@@ -93,6 +94,10 @@ | |||
93 | #define pte_pagesize_index(pte) \ | 94 | #define pte_pagesize_index(pte) \ |
94 | (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) | 95 | (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) |
95 | 96 | ||
97 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | ||
98 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ | ||
99 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) | ||
100 | |||
96 | #endif /* __ASSEMBLY__ */ | 101 | #endif /* __ASSEMBLY__ */ |
97 | #endif /* __KERNEL__ */ | 102 | #endif /* __KERNEL__ */ |
98 | #endif /* _ASM_POWERPC_PGTABLE_64K_H */ | 103 | #endif /* _ASM_POWERPC_PGTABLE_64K_H */ |
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index 10f52743f4ff..19edb6982b81 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h | |||
@@ -272,7 +272,10 @@ static inline pte_t pte_mkhuge(pte_t pte) { | |||
272 | return pte; } | 272 | return pte; } |
273 | 273 | ||
274 | /* Atomic PTE updates */ | 274 | /* Atomic PTE updates */ |
275 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) | 275 | static inline unsigned long pte_update(struct mm_struct *mm, |
276 | unsigned long addr, | ||
277 | pte_t *ptep, unsigned long clr, | ||
278 | int huge) | ||
276 | { | 279 | { |
277 | unsigned long old, tmp; | 280 | unsigned long old, tmp; |
278 | 281 | ||
@@ -283,20 +286,15 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) | |||
283 | andc %1,%0,%4 \n\ | 286 | andc %1,%0,%4 \n\ |
284 | stdcx. %1,0,%3 \n\ | 287 | stdcx. %1,0,%3 \n\ |
285 | bne- 1b" | 288 | bne- 1b" |
286 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | 289 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) |
287 | : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) | 290 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) |
288 | : "cc" ); | 291 | : "cc" ); |
292 | |||
293 | if (old & _PAGE_HASHPTE) | ||
294 | hpte_need_flush(mm, addr, ptep, old, huge); | ||
289 | return old; | 295 | return old; |
290 | } | 296 | } |
291 | 297 | ||
292 | /* PTE updating functions, this function puts the PTE in the | ||
293 | * batch, doesn't actually triggers the hash flush immediately, | ||
294 | * you need to call flush_tlb_pending() to do that. | ||
295 | * Pass -1 for "normal" size (4K or 64K) | ||
296 | */ | ||
297 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, | ||
298 | pte_t *ptep, unsigned long pte, int huge); | ||
299 | |||
300 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | 298 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
301 | unsigned long addr, pte_t *ptep) | 299 | unsigned long addr, pte_t *ptep) |
302 | { | 300 | { |
@@ -304,11 +302,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |||
304 | 302 | ||
305 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | 303 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) |
306 | return 0; | 304 | return 0; |
307 | old = pte_update(ptep, _PAGE_ACCESSED); | 305 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); |
308 | if (old & _PAGE_HASHPTE) { | ||
309 | hpte_update(mm, addr, ptep, old, 0); | ||
310 | flush_tlb_pending(); | ||
311 | } | ||
312 | return (old & _PAGE_ACCESSED) != 0; | 306 | return (old & _PAGE_ACCESSED) != 0; |
313 | } | 307 | } |
314 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 308 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
@@ -331,9 +325,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, | |||
331 | 325 | ||
332 | if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) | 326 | if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) |
333 | return 0; | 327 | return 0; |
334 | old = pte_update(ptep, _PAGE_DIRTY); | 328 | old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); |
335 | if (old & _PAGE_HASHPTE) | ||
336 | hpte_update(mm, addr, ptep, old, 0); | ||
337 | return (old & _PAGE_DIRTY) != 0; | 329 | return (old & _PAGE_DIRTY) != 0; |
338 | } | 330 | } |
339 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 331 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
@@ -352,9 +344,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
352 | 344 | ||
353 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 345 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
354 | return; | 346 | return; |
355 | old = pte_update(ptep, _PAGE_RW); | 347 | old = pte_update(mm, addr, ptep, _PAGE_RW, 0); |
356 | if (old & _PAGE_HASHPTE) | ||
357 | hpte_update(mm, addr, ptep, old, 0); | ||
358 | } | 348 | } |
359 | 349 | ||
360 | /* | 350 | /* |
@@ -378,7 +368,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
378 | ({ \ | 368 | ({ \ |
379 | int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ | 369 | int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ |
380 | __ptep); \ | 370 | __ptep); \ |
381 | flush_tlb_page(__vma, __address); \ | ||
382 | __dirty; \ | 371 | __dirty; \ |
383 | }) | 372 | }) |
384 | 373 | ||
@@ -386,20 +375,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
386 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | 375 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
387 | unsigned long addr, pte_t *ptep) | 376 | unsigned long addr, pte_t *ptep) |
388 | { | 377 | { |
389 | unsigned long old = pte_update(ptep, ~0UL); | 378 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); |
390 | |||
391 | if (old & _PAGE_HASHPTE) | ||
392 | hpte_update(mm, addr, ptep, old, 0); | ||
393 | return __pte(old); | 379 | return __pte(old); |
394 | } | 380 | } |
395 | 381 | ||
396 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | 382 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
397 | pte_t * ptep) | 383 | pte_t * ptep) |
398 | { | 384 | { |
399 | unsigned long old = pte_update(ptep, ~0UL); | 385 | pte_update(mm, addr, ptep, ~0UL, 0); |
400 | |||
401 | if (old & _PAGE_HASHPTE) | ||
402 | hpte_update(mm, addr, ptep, old, 0); | ||
403 | } | 386 | } |
404 | 387 | ||
405 | /* | 388 | /* |
@@ -408,10 +391,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |||
408 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 391 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
409 | pte_t *ptep, pte_t pte) | 392 | pte_t *ptep, pte_t pte) |
410 | { | 393 | { |
411 | if (pte_present(*ptep)) { | 394 | if (pte_present(*ptep)) |
412 | pte_clear(mm, addr, ptep); | 395 | pte_clear(mm, addr, ptep); |
413 | flush_tlb_pending(); | ||
414 | } | ||
415 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | 396 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
416 | *ptep = pte; | 397 | *ptep = pte; |
417 | } | 398 | } |
@@ -467,16 +448,6 @@ extern pgd_t swapper_pg_dir[]; | |||
467 | 448 | ||
468 | extern void paging_init(void); | 449 | extern void paging_init(void); |
469 | 450 | ||
470 | /* | ||
471 | * This gets called at the end of handling a page fault, when | ||
472 | * the kernel has put a new PTE into the page table for the process. | ||
473 | * We use it to put a corresponding HPTE into the hash table | ||
474 | * ahead of time, instead of waiting for the inevitable extra | ||
475 | * hash-table miss exception. | ||
476 | */ | ||
477 | struct vm_area_struct; | ||
478 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
479 | |||
480 | /* Encode and de-code a swap entry */ | 451 | /* Encode and de-code a swap entry */ |
481 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | 452 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) |
482 | #define __swp_offset(entry) ((entry).val >> 8) | 453 | #define __swp_offset(entry) ((entry).val >> 8) |
@@ -522,6 +493,7 @@ void pgtable_cache_init(void); | |||
522 | return pt; | 493 | return pt; |
523 | } | 494 | } |
524 | 495 | ||
496 | |||
525 | #include <asm-generic/pgtable.h> | 497 | #include <asm-generic/pgtable.h> |
526 | 498 | ||
527 | #endif /* __ASSEMBLY__ */ | 499 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-powerpc/pmc.h b/include/asm-powerpc/pmc.h index 8588be68e0ad..d6a616a1b3ea 100644 --- a/include/asm-powerpc/pmc.h +++ b/include/asm-powerpc/pmc.h | |||
@@ -30,6 +30,7 @@ void release_pmc_hardware(void); | |||
30 | 30 | ||
31 | #ifdef CONFIG_PPC64 | 31 | #ifdef CONFIG_PPC64 |
32 | void power4_enable_pmcs(void); | 32 | void power4_enable_pmcs(void); |
33 | void pasemi_enable_pmcs(void); | ||
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | #endif /* __KERNEL__ */ | 36 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index ab6eddb518c7..d74b2965bb82 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #define _ASM_POWERPC_PPC_PCI_H | 10 | #define _ASM_POWERPC_PPC_PCI_H |
11 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
12 | 12 | ||
13 | #ifdef CONFIG_PCI | ||
14 | |||
13 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
14 | #include <asm/pci-bridge.h> | 16 | #include <asm/pci-bridge.h> |
15 | 17 | ||
@@ -22,7 +24,7 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary); | |||
22 | extern struct list_head hose_list; | 24 | extern struct list_head hose_list; |
23 | extern int global_phb_number; | 25 | extern int global_phb_number; |
24 | 26 | ||
25 | extern unsigned long find_and_init_phbs(void); | 27 | extern void find_and_init_phbs(void); |
26 | 28 | ||
27 | extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */ | 29 | extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */ |
28 | 30 | ||
@@ -68,7 +70,7 @@ struct pci_dev *pci_get_device_by_addr(unsigned long addr); | |||
68 | void eeh_slot_error_detail (struct pci_dn *pdn, int severity); | 70 | void eeh_slot_error_detail (struct pci_dn *pdn, int severity); |
69 | 71 | ||
70 | /** | 72 | /** |
71 | * rtas_pci_enableo - enable IO transfers for this slot | 73 | * rtas_pci_enable - enable IO transfers for this slot |
72 | * @pdn: pci device node | 74 | * @pdn: pci device node |
73 | * @function: either EEH_THAW_MMIO or EEH_THAW_DMA | 75 | * @function: either EEH_THAW_MMIO or EEH_THAW_DMA |
74 | * | 76 | * |
@@ -89,6 +91,7 @@ int rtas_pci_enable(struct pci_dn *pdn, int function); | |||
89 | * Returns a non-zero value if the reset failed. | 91 | * Returns a non-zero value if the reset failed. |
90 | */ | 92 | */ |
91 | int rtas_set_slot_reset (struct pci_dn *); | 93 | int rtas_set_slot_reset (struct pci_dn *); |
94 | int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs); | ||
92 | 95 | ||
93 | /** | 96 | /** |
94 | * eeh_restore_bars - Restore device configuration info. | 97 | * eeh_restore_bars - Restore device configuration info. |
@@ -126,5 +129,10 @@ struct device_node * find_device_pe(struct device_node *dn); | |||
126 | 129 | ||
127 | #endif | 130 | #endif |
128 | 131 | ||
132 | #else /* CONFIG_PCI */ | ||
133 | static inline void find_and_init_phbs(void) { } | ||
134 | static inline void init_pci_config_tokens(void) { } | ||
135 | #endif /* !CONFIG_PCI */ | ||
136 | |||
129 | #endif /* __KERNEL__ */ | 137 | #endif /* __KERNEL__ */ |
130 | #endif /* _ASM_POWERPC_PPC_PCI_H */ | 138 | #endif /* _ASM_POWERPC_PPC_PCI_H */ |
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index a26c32ee5527..d947b1609491 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h | |||
@@ -133,7 +133,6 @@ struct thread_struct { | |||
133 | mm_segment_t fs; /* for get_fs() validation */ | 133 | mm_segment_t fs; /* for get_fs() validation */ |
134 | #ifdef CONFIG_PPC32 | 134 | #ifdef CONFIG_PPC32 |
135 | void *pgdir; /* root of page-table tree */ | 135 | void *pgdir; /* root of page-table tree */ |
136 | signed long last_syscall; | ||
137 | #endif | 136 | #endif |
138 | #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) | 137 | #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) |
139 | unsigned long dbcr0; /* debug control register values */ | 138 | unsigned long dbcr0; /* debug control register values */ |
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 020ed015a94b..ec400f608e16 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h | |||
@@ -18,7 +18,9 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <asm/irq.h> | ||
21 | #include <asm/atomic.h> | 22 | #include <asm/atomic.h> |
23 | #include <asm/io.h> | ||
22 | 24 | ||
23 | /* Definitions used by the flattened device tree */ | 25 | /* Definitions used by the flattened device tree */ |
24 | #define OF_DT_HEADER 0xd00dfeed /* marker */ | 26 | #define OF_DT_HEADER 0xd00dfeed /* marker */ |
@@ -58,6 +60,8 @@ struct boot_param_header | |||
58 | u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ | 60 | u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ |
59 | /* version 3 fields below */ | 61 | /* version 3 fields below */ |
60 | u32 dt_strings_size; /* size of the DT strings block */ | 62 | u32 dt_strings_size; /* size of the DT strings block */ |
63 | /* version 17 fields below */ | ||
64 | u32 dt_struct_size; /* size of the DT structure block */ | ||
61 | }; | 65 | }; |
62 | 66 | ||
63 | 67 | ||
@@ -68,7 +72,7 @@ typedef u32 ihandle; | |||
68 | struct property { | 72 | struct property { |
69 | char *name; | 73 | char *name; |
70 | int length; | 74 | int length; |
71 | unsigned char *value; | 75 | void *value; |
72 | struct property *next; | 76 | struct property *next; |
73 | }; | 77 | }; |
74 | 78 | ||
@@ -108,14 +112,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e | |||
108 | } | 112 | } |
109 | 113 | ||
110 | 114 | ||
111 | /* OBSOLETE: Old style node lookup */ | ||
112 | extern struct device_node *find_devices(const char *name); | ||
113 | extern struct device_node *find_type_devices(const char *type); | ||
114 | extern struct device_node *find_path_device(const char *path); | ||
115 | extern struct device_node *find_compatible_devices(const char *type, | ||
116 | const char *compat); | ||
117 | extern struct device_node *find_all_nodes(void); | ||
118 | |||
119 | /* New style node lookup */ | 115 | /* New style node lookup */ |
120 | extern struct device_node *of_find_node_by_name(struct device_node *from, | 116 | extern struct device_node *of_find_node_by_name(struct device_node *from, |
121 | const char *name); | 117 | const char *name); |
@@ -159,15 +155,17 @@ extern void of_detach_node(const struct device_node *); | |||
159 | extern void finish_device_tree(void); | 155 | extern void finish_device_tree(void); |
160 | extern void unflatten_device_tree(void); | 156 | extern void unflatten_device_tree(void); |
161 | extern void early_init_devtree(void *); | 157 | extern void early_init_devtree(void *); |
162 | extern int device_is_compatible(const struct device_node *device, | 158 | extern int of_device_is_compatible(const struct device_node *device, |
163 | const char *); | 159 | const char *); |
160 | #define device_is_compatible(d, c) of_device_is_compatible((d), (c)) | ||
164 | extern int machine_is_compatible(const char *compat); | 161 | extern int machine_is_compatible(const char *compat); |
165 | extern const void *get_property(const struct device_node *node, | 162 | extern const void *of_get_property(const struct device_node *node, |
166 | const char *name, | 163 | const char *name, |
167 | int *lenp); | 164 | int *lenp); |
165 | #define get_property(a, b, c) of_get_property((a), (b), (c)) | ||
168 | extern void print_properties(struct device_node *node); | 166 | extern void print_properties(struct device_node *node); |
169 | extern int prom_n_addr_cells(struct device_node* np); | 167 | extern int of_n_addr_cells(struct device_node* np); |
170 | extern int prom_n_size_cells(struct device_node* np); | 168 | extern int of_n_size_cells(struct device_node* np); |
171 | extern int prom_n_intr_cells(struct device_node* np); | 169 | extern int prom_n_intr_cells(struct device_node* np); |
172 | extern void prom_get_irq_senses(unsigned char *senses, int off, int max); | 170 | extern void prom_get_irq_senses(unsigned char *senses, int off, int max); |
173 | extern int prom_add_property(struct device_node* np, struct property* prop); | 171 | extern int prom_add_property(struct device_node* np, struct property* prop); |
@@ -350,6 +348,16 @@ static inline int of_irq_to_resource(struct device_node *dev, int index, struct | |||
350 | return irq; | 348 | return irq; |
351 | } | 349 | } |
352 | 350 | ||
351 | static inline void __iomem *of_iomap(struct device_node *np, int index) | ||
352 | { | ||
353 | struct resource res; | ||
354 | |||
355 | if (of_address_to_resource(np, index, &res)) | ||
356 | return NULL; | ||
357 | |||
358 | return ioremap(res.start, 1 + res.end - res.start); | ||
359 | } | ||
360 | |||
353 | 361 | ||
354 | #endif /* __KERNEL__ */ | 362 | #endif /* __KERNEL__ */ |
355 | #endif /* _POWERPC_PROM_H */ | 363 | #endif /* _POWERPC_PROM_H */ |
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 0d7f0164ed81..749c7f953b58 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h | |||
@@ -469,12 +469,68 @@ | |||
469 | #define SPRN_SIAR 780 | 469 | #define SPRN_SIAR 780 |
470 | #define SPRN_SDAR 781 | 470 | #define SPRN_SDAR 781 |
471 | 471 | ||
472 | #define PA6T_SPRN_PMC0 787 | 472 | #define SPRN_PA6T_MMCR0 795 |
473 | #define PA6T_SPRN_PMC1 788 | 473 | #define PA6T_MMCR0_EN0 0x0000000000000001UL |
474 | #define PA6T_SPRN_PMC2 789 | 474 | #define PA6T_MMCR0_EN1 0x0000000000000002UL |
475 | #define PA6T_SPRN_PMC3 790 | 475 | #define PA6T_MMCR0_EN2 0x0000000000000004UL |
476 | #define PA6T_SPRN_PMC4 791 | 476 | #define PA6T_MMCR0_EN3 0x0000000000000008UL |
477 | #define PA6T_SPRN_PMC5 792 | 477 | #define PA6T_MMCR0_EN4 0x0000000000000010UL |
478 | #define PA6T_MMCR0_EN5 0x0000000000000020UL | ||
479 | #define PA6T_MMCR0_SUPEN 0x0000000000000040UL | ||
480 | #define PA6T_MMCR0_PREN 0x0000000000000080UL | ||
481 | #define PA6T_MMCR0_HYPEN 0x0000000000000100UL | ||
482 | #define PA6T_MMCR0_FCM0 0x0000000000000200UL | ||
483 | #define PA6T_MMCR0_FCM1 0x0000000000000400UL | ||
484 | #define PA6T_MMCR0_INTGEN 0x0000000000000800UL | ||
485 | #define PA6T_MMCR0_INTEN0 0x0000000000001000UL | ||
486 | #define PA6T_MMCR0_INTEN1 0x0000000000002000UL | ||
487 | #define PA6T_MMCR0_INTEN2 0x0000000000004000UL | ||
488 | #define PA6T_MMCR0_INTEN3 0x0000000000008000UL | ||
489 | #define PA6T_MMCR0_INTEN4 0x0000000000010000UL | ||
490 | #define PA6T_MMCR0_INTEN5 0x0000000000020000UL | ||
491 | #define PA6T_MMCR0_DISCNT 0x0000000000040000UL | ||
492 | #define PA6T_MMCR0_UOP 0x0000000000080000UL | ||
493 | #define PA6T_MMCR0_TRG 0x0000000000100000UL | ||
494 | #define PA6T_MMCR0_TRGEN 0x0000000000200000UL | ||
495 | #define PA6T_MMCR0_TRGREG 0x0000000001600000UL | ||
496 | #define PA6T_MMCR0_SIARLOG 0x0000000002000000UL | ||
497 | #define PA6T_MMCR0_SDARLOG 0x0000000004000000UL | ||
498 | #define PA6T_MMCR0_PROEN 0x0000000008000000UL | ||
499 | #define PA6T_MMCR0_PROLOG 0x0000000010000000UL | ||
500 | #define PA6T_MMCR0_DAMEN2 0x0000000020000000UL | ||
501 | #define PA6T_MMCR0_DAMEN3 0x0000000040000000UL | ||
502 | #define PA6T_MMCR0_DAMEN4 0x0000000080000000UL | ||
503 | #define PA6T_MMCR0_DAMEN5 0x0000000100000000UL | ||
504 | #define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL | ||
505 | #define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL | ||
506 | #define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL | ||
507 | #define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL | ||
508 | #define PA6T_MMCR0_HANDDIS 0x0000002000000000UL | ||
509 | #define PA6T_MMCR0_PCTEN 0x0000004000000000UL | ||
510 | #define PA6T_MMCR0_SOCEN 0x0000008000000000UL | ||
511 | #define PA6T_MMCR0_SOCMOD 0x0000010000000000UL | ||
512 | |||
513 | #define SPRN_PA6T_MMCR1 798 | ||
514 | #define PA6T_MMCR1_ES2 0x00000000000000ffUL | ||
515 | #define PA6T_MMCR1_ES3 0x000000000000ff00UL | ||
516 | #define PA6T_MMCR1_ES4 0x0000000000ff0000UL | ||
517 | #define PA6T_MMCR1_ES5 0x00000000ff000000UL | ||
518 | |||
519 | #define SPRN_PA6T_SIAR 780 | ||
520 | #define SPRN_PA6T_UPMC0 771 | ||
521 | #define SPRN_PA6T_UPMC1 772 | ||
522 | #define SPRN_PA6T_UPMC2 773 | ||
523 | #define SPRN_PA6T_UPMC3 774 | ||
524 | #define SPRN_PA6T_UPMC4 775 | ||
525 | #define SPRN_PA6T_UPMC5 776 | ||
526 | #define SPRN_PA6T_UMMCR0 779 | ||
527 | #define SPRN_PA6T_UMMCR1 782 | ||
528 | #define SPRN_PA6T_PMC0 787 | ||
529 | #define SPRN_PA6T_PMC1 788 | ||
530 | #define SPRN_PA6T_PMC2 789 | ||
531 | #define SPRN_PA6T_PMC3 790 | ||
532 | #define SPRN_PA6T_PMC4 791 | ||
533 | #define SPRN_PA6T_PMC5 792 | ||
478 | 534 | ||
479 | #else /* 32-bit */ | 535 | #else /* 32-bit */ |
480 | #define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */ | 536 | #define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */ |
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 8aad0619eb8e..02e56a6685a2 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h | |||
@@ -242,6 +242,7 @@ struct spu_state { | |||
242 | u64 spu_chnldata_RW[32]; | 242 | u64 spu_chnldata_RW[32]; |
243 | u32 spu_mailbox_data[4]; | 243 | u32 spu_mailbox_data[4]; |
244 | u32 pu_mailbox_data[1]; | 244 | u32 pu_mailbox_data[1]; |
245 | u64 dar, dsisr; | ||
245 | unsigned long suspend_time; | 246 | unsigned long suspend_time; |
246 | spinlock_t register_lock; | 247 | spinlock_t register_lock; |
247 | }; | 248 | }; |
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index f7b1227d6454..d3e0906ff2bc 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h | |||
@@ -131,6 +131,7 @@ extern void enable_kernel_altivec(void); | |||
131 | extern void giveup_altivec(struct task_struct *); | 131 | extern void giveup_altivec(struct task_struct *); |
132 | extern void load_up_altivec(struct task_struct *); | 132 | extern void load_up_altivec(struct task_struct *); |
133 | extern int emulate_altivec(struct pt_regs *); | 133 | extern int emulate_altivec(struct pt_regs *); |
134 | extern void enable_kernel_spe(void); | ||
134 | extern void giveup_spe(struct task_struct *); | 135 | extern void giveup_spe(struct task_struct *); |
135 | extern void load_up_spe(struct task_struct *); | 136 | extern void load_up_spe(struct task_struct *); |
136 | extern int fix_alignment(struct pt_regs *); | 137 | extern int fix_alignment(struct pt_regs *); |
diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h index 4e2a834683fb..0a17682663d8 100644 --- a/include/asm-powerpc/tlb.h +++ b/include/asm-powerpc/tlb.h | |||
@@ -38,7 +38,6 @@ extern void pte_free_finish(void); | |||
38 | 38 | ||
39 | static inline void tlb_flush(struct mmu_gather *tlb) | 39 | static inline void tlb_flush(struct mmu_gather *tlb) |
40 | { | 40 | { |
41 | flush_tlb_pending(); | ||
42 | pte_free_finish(); | 41 | pte_free_finish(); |
43 | } | 42 | } |
44 | 43 | ||
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 93c7d0c7230f..86e6266a028b 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h | |||
@@ -17,10 +17,73 @@ | |||
17 | */ | 17 | */ |
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | 19 | ||
20 | |||
21 | struct mm_struct; | 20 | struct mm_struct; |
21 | struct vm_area_struct; | ||
22 | |||
23 | #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) | ||
24 | /* | ||
25 | * TLB flushing for software loaded TLB chips | ||
26 | * | ||
27 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | ||
28 | * flush_tlb_kernel_range are best implemented as tlbia vs | ||
29 | * specific tlbie's | ||
30 | */ | ||
31 | |||
32 | extern void _tlbie(unsigned long address); | ||
33 | |||
34 | #if defined(CONFIG_40x) || defined(CONFIG_8xx) | ||
35 | #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") | ||
36 | #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ | ||
37 | extern void _tlbia(void); | ||
38 | #endif | ||
39 | |||
40 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
41 | { | ||
42 | _tlbia(); | ||
43 | } | ||
44 | |||
45 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
46 | unsigned long vmaddr) | ||
47 | { | ||
48 | _tlbie(vmaddr); | ||
49 | } | ||
50 | |||
51 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | ||
52 | unsigned long vmaddr) | ||
53 | { | ||
54 | _tlbie(vmaddr); | ||
55 | } | ||
56 | |||
57 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
58 | unsigned long start, unsigned long end) | ||
59 | { | ||
60 | _tlbia(); | ||
61 | } | ||
62 | |||
63 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
64 | unsigned long end) | ||
65 | { | ||
66 | _tlbia(); | ||
67 | } | ||
22 | 68 | ||
23 | #ifdef CONFIG_PPC64 | 69 | #elif defined(CONFIG_PPC32) |
70 | /* | ||
71 | * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx | ||
72 | */ | ||
73 | extern void _tlbie(unsigned long address); | ||
74 | extern void _tlbia(void); | ||
75 | |||
76 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
77 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
78 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | ||
79 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
80 | unsigned long end); | ||
81 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
82 | |||
83 | #else | ||
84 | /* | ||
85 | * TLB flushing for 64-bit has-MMU CPUs | ||
86 | */ | ||
24 | 87 | ||
25 | #include <linux/percpu.h> | 88 | #include <linux/percpu.h> |
26 | #include <asm/page.h> | 89 | #include <asm/page.h> |
@@ -28,117 +91,90 @@ struct mm_struct; | |||
28 | #define PPC64_TLB_BATCH_NR 192 | 91 | #define PPC64_TLB_BATCH_NR 192 |
29 | 92 | ||
30 | struct ppc64_tlb_batch { | 93 | struct ppc64_tlb_batch { |
31 | unsigned long index; | 94 | int active; |
32 | struct mm_struct *mm; | 95 | unsigned long index; |
33 | real_pte_t pte[PPC64_TLB_BATCH_NR]; | 96 | struct mm_struct *mm; |
34 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | 97 | real_pte_t pte[PPC64_TLB_BATCH_NR]; |
35 | unsigned int psize; | 98 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; |
99 | unsigned int psize; | ||
36 | }; | 100 | }; |
37 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | 101 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
38 | 102 | ||
39 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | 103 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); |
40 | 104 | ||
41 | static inline void flush_tlb_pending(void) | 105 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
106 | pte_t *ptep, unsigned long pte, int huge); | ||
107 | |||
108 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
109 | |||
110 | static inline void arch_enter_lazy_mmu_mode(void) | ||
111 | { | ||
112 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||
113 | |||
114 | batch->active = 1; | ||
115 | } | ||
116 | |||
117 | static inline void arch_leave_lazy_mmu_mode(void) | ||
42 | { | 118 | { |
43 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); | 119 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
44 | 120 | ||
45 | if (batch->index) | 121 | if (batch->index) |
46 | __flush_tlb_pending(batch); | 122 | __flush_tlb_pending(batch); |
47 | put_cpu_var(ppc64_tlb_batch); | 123 | batch->active = 0; |
48 | } | 124 | } |
49 | 125 | ||
126 | #define arch_flush_lazy_mmu_mode() do {} while (0) | ||
127 | |||
128 | |||
50 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, | 129 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, |
51 | int local); | 130 | int local); |
52 | extern void flush_hash_range(unsigned long number, int local); | 131 | extern void flush_hash_range(unsigned long number, int local); |
53 | 132 | ||
54 | #else /* CONFIG_PPC64 */ | ||
55 | |||
56 | #include <linux/mm.h> | ||
57 | |||
58 | extern void _tlbie(unsigned long address); | ||
59 | extern void _tlbia(void); | ||
60 | |||
61 | /* | ||
62 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | ||
63 | * flush_tlb_kernel_range are best implemented as tlbia vs | ||
64 | * specific tlbie's | ||
65 | */ | ||
66 | |||
67 | #if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) | ||
68 | #define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") | ||
69 | #elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) | ||
70 | #define flush_tlb_pending() _tlbia() | ||
71 | #endif | ||
72 | |||
73 | /* | ||
74 | * This gets called at the end of handling a page fault, when | ||
75 | * the kernel has put a new PTE into the page table for the process. | ||
76 | * We use it to ensure coherency between the i-cache and d-cache | ||
77 | * for the page which has just been mapped in. | ||
78 | * On machines which use an MMU hash table, we use this to put a | ||
79 | * corresponding HPTE into the hash table ahead of time, instead of | ||
80 | * waiting for the inevitable extra hash-table miss exception. | ||
81 | */ | ||
82 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
83 | |||
84 | #endif /* CONFIG_PPC64 */ | ||
85 | |||
86 | #if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ | ||
87 | defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) | ||
88 | 133 | ||
89 | static inline void flush_tlb_mm(struct mm_struct *mm) | 134 | static inline void flush_tlb_mm(struct mm_struct *mm) |
90 | { | 135 | { |
91 | flush_tlb_pending(); | ||
92 | } | 136 | } |
93 | 137 | ||
94 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 138 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
95 | unsigned long vmaddr) | 139 | unsigned long vmaddr) |
96 | { | 140 | { |
97 | #ifdef CONFIG_PPC64 | ||
98 | flush_tlb_pending(); | ||
99 | #else | ||
100 | _tlbie(vmaddr); | ||
101 | #endif | ||
102 | } | 141 | } |
103 | 142 | ||
104 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | 143 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, |
105 | unsigned long vmaddr) | 144 | unsigned long vmaddr) |
106 | { | 145 | { |
107 | #ifndef CONFIG_PPC64 | ||
108 | _tlbie(vmaddr); | ||
109 | #endif | ||
110 | } | 146 | } |
111 | 147 | ||
112 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 148 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
113 | unsigned long start, unsigned long end) | 149 | unsigned long start, unsigned long end) |
114 | { | 150 | { |
115 | flush_tlb_pending(); | ||
116 | } | 151 | } |
117 | 152 | ||
118 | static inline void flush_tlb_kernel_range(unsigned long start, | 153 | static inline void flush_tlb_kernel_range(unsigned long start, |
119 | unsigned long end) | 154 | unsigned long end) |
120 | { | 155 | { |
121 | flush_tlb_pending(); | ||
122 | } | 156 | } |
123 | 157 | ||
124 | #else /* 6xx, 7xx, 7xxx cpus */ | ||
125 | |||
126 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
127 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
128 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | ||
129 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
130 | unsigned long end); | ||
131 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
132 | |||
133 | #endif | 158 | #endif |
134 | 159 | ||
135 | /* | 160 | /* |
161 | * This gets called at the end of handling a page fault, when | ||
162 | * the kernel has put a new PTE into the page table for the process. | ||
163 | * We use it to ensure coherency between the i-cache and d-cache | ||
164 | * for the page which has just been mapped in. | ||
165 | * On machines which use an MMU hash table, we use this to put a | ||
166 | * corresponding HPTE into the hash table ahead of time, instead of | ||
167 | * waiting for the inevitable extra hash-table miss exception. | ||
168 | */ | ||
169 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
170 | |||
171 | /* | ||
136 | * This is called in munmap when we have freed up some page-table | 172 | * This is called in munmap when we have freed up some page-table |
137 | * pages. We don't need to do anything here, there's nothing special | 173 | * pages. We don't need to do anything here, there's nothing special |
138 | * about our page-table pages. -- paulus | 174 | * about our page-table pages. -- paulus |
139 | */ | 175 | */ |
140 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | 176 | static inline void flush_tlb_pgtables(struct mm_struct *mm, |
141 | unsigned long start, unsigned long end) | 177 | unsigned long start, unsigned long end) |
142 | { | 178 | { |
143 | } | 179 | } |
144 | 180 | ||
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index adbf16b8cfbb..8e798e3758bc 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h | |||
@@ -110,12 +110,18 @@ struct exception_table_entry { | |||
110 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | 110 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
111 | #define __put_user(x, ptr) \ | 111 | #define __put_user(x, ptr) \ |
112 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | 112 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
113 | |||
113 | #ifndef __powerpc64__ | 114 | #ifndef __powerpc64__ |
114 | #define __get_user64(x, ptr) \ | 115 | #define __get_user64(x, ptr) \ |
115 | __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) | 116 | __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) |
116 | #define __put_user64(x, ptr) __put_user(x, ptr) | 117 | #define __put_user64(x, ptr) __put_user(x, ptr) |
117 | #endif | 118 | #endif |
118 | 119 | ||
120 | #define __get_user_inatomic(x, ptr) \ | ||
121 | __get_user_nosleep((x), (ptr), sizeof(*(ptr))) | ||
122 | #define __put_user_inatomic(x, ptr) \ | ||
123 | __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
124 | |||
119 | #define __get_user_unaligned __get_user | 125 | #define __get_user_unaligned __get_user |
120 | #define __put_user_unaligned __put_user | 126 | #define __put_user_unaligned __put_user |
121 | 127 | ||
@@ -198,6 +204,16 @@ do { \ | |||
198 | __pu_err; \ | 204 | __pu_err; \ |
199 | }) | 205 | }) |
200 | 206 | ||
207 | #define __put_user_nosleep(x, ptr, size) \ | ||
208 | ({ \ | ||
209 | long __pu_err; \ | ||
210 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
211 | __chk_user_ptr(ptr); \ | ||
212 | __put_user_size((x), __pu_addr, (size), __pu_err); \ | ||
213 | __pu_err; \ | ||
214 | }) | ||
215 | |||
216 | |||
201 | extern long __get_user_bad(void); | 217 | extern long __get_user_bad(void); |
202 | 218 | ||
203 | #define __get_user_asm(x, addr, err, op) \ | 219 | #define __get_user_asm(x, addr, err, op) \ |
@@ -297,6 +313,18 @@ do { \ | |||
297 | __gu_err; \ | 313 | __gu_err; \ |
298 | }) | 314 | }) |
299 | 315 | ||
316 | #define __get_user_nosleep(x, ptr, size) \ | ||
317 | ({ \ | ||
318 | long __gu_err; \ | ||
319 | unsigned long __gu_val; \ | ||
320 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
321 | __chk_user_ptr(ptr); \ | ||
322 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | ||
323 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
324 | __gu_err; \ | ||
325 | }) | ||
326 | |||
327 | |||
300 | /* more complex routines */ | 328 | /* more complex routines */ |
301 | 329 | ||
302 | extern unsigned long __copy_tofrom_user(void __user *to, | 330 | extern unsigned long __copy_tofrom_user(void __user *to, |
diff --git a/include/asm-powerpc/uic.h b/include/asm-powerpc/uic.h new file mode 100644 index 000000000000..970eb7e2186a --- /dev/null +++ b/include/asm-powerpc/uic.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * include/asm-powerpc/uic.h | ||
3 | * | ||
4 | * IBM PPC4xx UIC external definitions and structure. | ||
5 | * | ||
6 | * Maintainer: David Gibson <dwg@au1.ibm.com> | ||
7 | * Copyright 2007 IBM Corporation. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | */ | ||
14 | #ifndef _ASM_POWERPC_UIC_H | ||
15 | #define _ASM_POWERPC_UIC_H | ||
16 | |||
17 | #ifdef __KERNEL__ | ||
18 | |||
19 | extern void __init uic_init_tree(void); | ||
20 | extern unsigned int uic_get_irq(void); | ||
21 | |||
22 | #endif /* __KERNEL__ */ | ||
23 | #endif /* _ASM_POWERPC_UIC_H */ | ||
diff --git a/include/asm-ppc/ibm4xx.h b/include/asm-ppc/ibm4xx.h index 92fd02d7b177..ed6891af05d3 100644 --- a/include/asm-ppc/ibm4xx.h +++ b/include/asm-ppc/ibm4xx.h | |||
@@ -47,12 +47,8 @@ | |||
47 | #include <platforms/4xx/walnut.h> | 47 | #include <platforms/4xx/walnut.h> |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #if defined(CONFIG_XILINX_ML300) | 50 | #if defined(CONFIG_XILINX_VIRTEX) |
51 | #include <platforms/4xx/xilinx_ml300.h> | 51 | #include <platforms/4xx/virtex.h> |
52 | #endif | ||
53 | |||
54 | #if defined(CONFIG_XILINX_ML403) | ||
55 | #include <platforms/4xx/xilinx_ml403.h> | ||
56 | #endif | 52 | #endif |
57 | 53 | ||
58 | #ifndef __ASSEMBLY__ | 54 | #ifndef __ASSEMBLY__ |
diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h index 40f197af6508..de99e92d627b 100644 --- a/include/asm-ppc/ppc_sys.h +++ b/include/asm-ppc/ppc_sys.h | |||
@@ -33,8 +33,6 @@ | |||
33 | #include <asm/mpc52xx.h> | 33 | #include <asm/mpc52xx.h> |
34 | #elif defined(CONFIG_MPC10X_BRIDGE) | 34 | #elif defined(CONFIG_MPC10X_BRIDGE) |
35 | #include <asm/mpc10x.h> | 35 | #include <asm/mpc10x.h> |
36 | #elif defined(CONFIG_XILINX_VIRTEX) | ||
37 | #include <platforms/4xx/virtex.h> | ||
38 | #else | 36 | #else |
39 | #error "need definition of ppc_sys_devices" | 37 | #error "need definition of ppc_sys_devices" |
40 | #endif | 38 | #endif |
diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h index adc5ae784924..901f7fa8b2d7 100644 --- a/include/asm-ppc/prom.h +++ b/include/asm-ppc/prom.h | |||
@@ -34,7 +34,8 @@ extern unsigned long sub_reloc_offset(unsigned long); | |||
34 | */ | 34 | */ |
35 | #define machine_is_compatible(x) 0 | 35 | #define machine_is_compatible(x) 0 |
36 | #define of_find_compatible_node(f, t, c) NULL | 36 | #define of_find_compatible_node(f, t, c) NULL |
37 | #define get_property(p, n, l) NULL | 37 | #define of_get_property(p, n, l) NULL |
38 | #define get_property(a, b, c) of_get_property((a), (b), (c)) | ||
38 | 39 | ||
39 | #endif /* _PPC_PROM_H */ | 40 | #endif /* _PPC_PROM_H */ |
40 | #endif /* __KERNEL__ */ | 41 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 783177387ac6..b0952e532ed5 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h | |||
@@ -168,24 +168,16 @@ extern int pmu_get_model(void); | |||
168 | 168 | ||
169 | struct pmu_sleep_notifier | 169 | struct pmu_sleep_notifier |
170 | { | 170 | { |
171 | int (*notifier_call)(struct pmu_sleep_notifier *self, int when); | 171 | void (*notifier_call)(struct pmu_sleep_notifier *self, int when); |
172 | int priority; | 172 | int priority; |
173 | struct list_head list; | 173 | struct list_head list; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | /* Code values for calling sleep/wakeup handlers | 176 | /* Code values for calling sleep/wakeup handlers |
177 | * | ||
178 | * Note: If a sleep request got cancelled, all drivers will get | ||
179 | * the PBOOK_SLEEP_REJECT, even those who didn't get the PBOOK_SLEEP_REQUEST. | ||
180 | */ | 177 | */ |
181 | #define PBOOK_SLEEP_REQUEST 1 | 178 | #define PBOOK_SLEEP_REQUEST 1 |
182 | #define PBOOK_SLEEP_NOW 2 | 179 | #define PBOOK_SLEEP_NOW 2 |
183 | #define PBOOK_SLEEP_REJECT 3 | 180 | #define PBOOK_WAKE 3 |
184 | #define PBOOK_WAKE 4 | ||
185 | |||
186 | /* Result codes returned by the notifiers */ | ||
187 | #define PBOOK_SLEEP_OK 0 | ||
188 | #define PBOOK_SLEEP_REFUSE -1 | ||
189 | 181 | ||
190 | /* priority levels in notifiers */ | 182 | /* priority levels in notifiers */ |
191 | #define SLEEP_LEVEL_VIDEO 100 /* Video driver (first wake) */ | 183 | #define SLEEP_LEVEL_VIDEO 100 /* Video driver (first wake) */ |