diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 01:11:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 01:11:30 -0400 |
commit | 45c091bb2d453ce4a8b06cf19872ec7a77fc4799 (patch) | |
tree | 06fb2e05518ebfba163f8424e028e7faf5672d66 /include/asm-ppc | |
parent | d588fcbe5a7ba8bba2cebf7799ab2d573717a806 (diff) | |
parent | 2191fe3e39159e3375f4b7ec1420df149f154101 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (139 commits)
[POWERPC] re-enable OProfile for iSeries, using timer interrupt
[POWERPC] support ibm,extended-*-frequency properties
[POWERPC] Extra sanity check in EEH code
[POWERPC] Dont look for class-code in pci children
[POWERPC] Fix mdelay badness on shared processor partitions
[POWERPC] disable floating point exceptions for init
[POWERPC] Unify ppc syscall tables
[POWERPC] mpic: add support for serial mode interrupts
[POWERPC] pseries: Print PCI slot location code on failure
[POWERPC] spufs: one more fix for 64k pages
[POWERPC] spufs: fail spu_create with invalid flags
[POWERPC] spufs: clear class2 interrupt status before wakeup
[POWERPC] spufs: fix Makefile for "make clean"
[POWERPC] spufs: remove stop_code from struct spu
[POWERPC] spufs: fix spu irq affinity setting
[POWERPC] spufs: further abstract priv1 register access
[POWERPC] spufs: split the Cell BE support into generic and platform dependant parts
[POWERPC] spufs: dont try to access SPE channel 1 count
[POWERPC] spufs: use kzalloc in create_spu
[POWERPC] spufs: fix initial state of wbox file
...
Manually resolved conflicts in:
drivers/net/phy/Makefile
include/asm-powerpc/spu.h
Diffstat (limited to 'include/asm-ppc')
-rw-r--r-- | include/asm-ppc/mmu.h | 23 | ||||
-rw-r--r-- | include/asm-ppc/mmu_context.h | 27 | ||||
-rw-r--r-- | include/asm-ppc/mpc85xx.h | 3 | ||||
-rw-r--r-- | include/asm-ppc/pgtable.h | 2 |
4 files changed, 24 insertions, 31 deletions
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h index 0a70b05b3afb..14584e505ed5 100644 --- a/include/asm-ppc/mmu.h +++ b/include/asm-ppc/mmu.h | |||
@@ -23,25 +23,18 @@ extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t); | |||
23 | #define PHYS_FMT "%16Lx" | 23 | #define PHYS_FMT "%16Lx" |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* Default "unsigned long" context */ | 26 | typedef struct { |
27 | typedef unsigned long mm_context_t; | 27 | unsigned long id; |
28 | unsigned long vdso_base; | ||
29 | } mm_context_t; | ||
28 | 30 | ||
29 | /* Hardware Page Table Entry */ | 31 | /* Hardware Page Table Entry */ |
30 | typedef struct _PTE { | 32 | typedef struct _PTE { |
31 | #ifdef CONFIG_PPC64BRIDGE | ||
32 | unsigned long long vsid:52; | ||
33 | unsigned long api:5; | ||
34 | unsigned long :5; | ||
35 | unsigned long h:1; | ||
36 | unsigned long v:1; | ||
37 | unsigned long long rpn:52; | ||
38 | #else /* CONFIG_PPC64BRIDGE */ | ||
39 | unsigned long v:1; /* Entry is valid */ | 33 | unsigned long v:1; /* Entry is valid */ |
40 | unsigned long vsid:24; /* Virtual segment identifier */ | 34 | unsigned long vsid:24; /* Virtual segment identifier */ |
41 | unsigned long h:1; /* Hash algorithm indicator */ | 35 | unsigned long h:1; /* Hash algorithm indicator */ |
42 | unsigned long api:6; /* Abbreviated page index */ | 36 | unsigned long api:6; /* Abbreviated page index */ |
43 | unsigned long rpn:20; /* Real (physical) page number */ | 37 | unsigned long rpn:20; /* Real (physical) page number */ |
44 | #endif /* CONFIG_PPC64BRIDGE */ | ||
45 | unsigned long :3; /* Unused */ | 38 | unsigned long :3; /* Unused */ |
46 | unsigned long r:1; /* Referenced */ | 39 | unsigned long r:1; /* Referenced */ |
47 | unsigned long c:1; /* Changed */ | 40 | unsigned long c:1; /* Changed */ |
@@ -82,11 +75,7 @@ typedef struct _P601_BATU { /* Upper part of BAT for 601 processor */ | |||
82 | } P601_BATU; | 75 | } P601_BATU; |
83 | 76 | ||
84 | typedef struct _BATU { /* Upper part of BAT (all except 601) */ | 77 | typedef struct _BATU { /* Upper part of BAT (all except 601) */ |
85 | #ifdef CONFIG_PPC64BRIDGE | ||
86 | unsigned long long bepi:47; | ||
87 | #else /* CONFIG_PPC64BRIDGE */ | ||
88 | unsigned long bepi:15; /* Effective page index (virtual address) */ | 78 | unsigned long bepi:15; /* Effective page index (virtual address) */ |
89 | #endif /* CONFIG_PPC64BRIDGE */ | ||
90 | unsigned long :4; /* Unused */ | 79 | unsigned long :4; /* Unused */ |
91 | unsigned long bl:11; /* Block size mask */ | 80 | unsigned long bl:11; /* Block size mask */ |
92 | unsigned long vs:1; /* Supervisor valid */ | 81 | unsigned long vs:1; /* Supervisor valid */ |
@@ -101,11 +90,7 @@ typedef struct _P601_BATL { /* Lower part of BAT for 601 processor */ | |||
101 | } P601_BATL; | 90 | } P601_BATL; |
102 | 91 | ||
103 | typedef struct _BATL { /* Lower part of BAT (all except 601) */ | 92 | typedef struct _BATL { /* Lower part of BAT (all except 601) */ |
104 | #ifdef CONFIG_PPC64BRIDGE | ||
105 | unsigned long long brpn:47; | ||
106 | #else /* CONFIG_PPC64BRIDGE */ | ||
107 | unsigned long brpn:15; /* Real page index (physical address) */ | 93 | unsigned long brpn:15; /* Real page index (physical address) */ |
108 | #endif /* CONFIG_PPC64BRIDGE */ | ||
109 | unsigned long :10; /* Unused */ | 94 | unsigned long :10; /* Unused */ |
110 | unsigned long w:1; /* Write-thru cache */ | 95 | unsigned long w:1; /* Write-thru cache */ |
111 | unsigned long i:1; /* Cache inhibit */ | 96 | unsigned long i:1; /* Cache inhibit */ |
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h index 94f2bf71310d..2bc8589cc451 100644 --- a/include/asm-ppc/mmu_context.h +++ b/include/asm-ppc/mmu_context.h | |||
@@ -70,7 +70,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
70 | #else | 70 | #else |
71 | 71 | ||
72 | /* PPC 6xx, 7xx CPUs */ | 72 | /* PPC 6xx, 7xx CPUs */ |
73 | #define NO_CONTEXT ((mm_context_t) -1) | 73 | #define NO_CONTEXT ((unsigned long) -1) |
74 | #define LAST_CONTEXT 32767 | 74 | #define LAST_CONTEXT 32767 |
75 | #define FIRST_CONTEXT 1 | 75 | #define FIRST_CONTEXT 1 |
76 | #endif | 76 | #endif |
@@ -85,7 +85,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
85 | * can be used for debugging on all processors (if you happen to have | 85 | * can be used for debugging on all processors (if you happen to have |
86 | * an Abatron). | 86 | * an Abatron). |
87 | */ | 87 | */ |
88 | extern void set_context(mm_context_t context, pgd_t *pgd); | 88 | extern void set_context(unsigned long contextid, pgd_t *pgd); |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Bitmap of contexts in use. | 91 | * Bitmap of contexts in use. |
@@ -98,7 +98,7 @@ extern unsigned long context_map[]; | |||
98 | * Its use is an optimization only, we can't rely on this context | 98 | * Its use is an optimization only, we can't rely on this context |
99 | * number to be free, but it usually will be. | 99 | * number to be free, but it usually will be. |
100 | */ | 100 | */ |
101 | extern mm_context_t next_mmu_context; | 101 | extern unsigned long next_mmu_context; |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * If we don't have sufficient contexts to give one to every task | 104 | * If we don't have sufficient contexts to give one to every task |
@@ -117,9 +117,9 @@ extern void steal_context(void); | |||
117 | */ | 117 | */ |
118 | static inline void get_mmu_context(struct mm_struct *mm) | 118 | static inline void get_mmu_context(struct mm_struct *mm) |
119 | { | 119 | { |
120 | mm_context_t ctx; | 120 | unsigned long ctx; |
121 | 121 | ||
122 | if (mm->context != NO_CONTEXT) | 122 | if (mm->context.id != NO_CONTEXT) |
123 | return; | 123 | return; |
124 | #ifdef FEW_CONTEXTS | 124 | #ifdef FEW_CONTEXTS |
125 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) | 125 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) |
@@ -132,7 +132,7 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
132 | ctx = 0; | 132 | ctx = 0; |
133 | } | 133 | } |
134 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; | 134 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; |
135 | mm->context = ctx; | 135 | mm->context.id = ctx; |
136 | #ifdef FEW_CONTEXTS | 136 | #ifdef FEW_CONTEXTS |
137 | context_mm[ctx] = mm; | 137 | context_mm[ctx] = mm; |
138 | #endif | 138 | #endif |
@@ -141,7 +141,12 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
141 | /* | 141 | /* |
142 | * Set up the context for a new address space. | 142 | * Set up the context for a new address space. |
143 | */ | 143 | */ |
144 | #define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0) | 144 | static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) |
145 | { | ||
146 | mm->context.id = NO_CONTEXT; | ||
147 | mm->context.vdso_base = 0; | ||
148 | return 0; | ||
149 | } | ||
145 | 150 | ||
146 | /* | 151 | /* |
147 | * We're finished using the context for an address space. | 152 | * We're finished using the context for an address space. |
@@ -149,9 +154,9 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
149 | static inline void destroy_context(struct mm_struct *mm) | 154 | static inline void destroy_context(struct mm_struct *mm) |
150 | { | 155 | { |
151 | preempt_disable(); | 156 | preempt_disable(); |
152 | if (mm->context != NO_CONTEXT) { | 157 | if (mm->context.id != NO_CONTEXT) { |
153 | clear_bit(mm->context, context_map); | 158 | clear_bit(mm->context.id, context_map); |
154 | mm->context = NO_CONTEXT; | 159 | mm->context.id = NO_CONTEXT; |
155 | #ifdef FEW_CONTEXTS | 160 | #ifdef FEW_CONTEXTS |
156 | atomic_inc(&nr_free_contexts); | 161 | atomic_inc(&nr_free_contexts); |
157 | #endif | 162 | #endif |
@@ -179,7 +184,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
179 | 184 | ||
180 | /* Setup new userspace context */ | 185 | /* Setup new userspace context */ |
181 | get_mmu_context(next); | 186 | get_mmu_context(next); |
182 | set_context(next->context, next->pgd); | 187 | set_context(next->context.id, next->pgd); |
183 | } | 188 | } |
184 | 189 | ||
185 | #define deactivate_mm(tsk,mm) do { } while (0) | 190 | #define deactivate_mm(tsk,mm) do { } while (0) |
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h index c25bdd9debf8..9b4851199c76 100644 --- a/include/asm-ppc/mpc85xx.h +++ b/include/asm-ppc/mpc85xx.h | |||
@@ -27,6 +27,9 @@ | |||
27 | #if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS) | 27 | #if defined(CONFIG_MPC8555_CDS) || defined(CONFIG_MPC8548_CDS) |
28 | #include <platforms/85xx/mpc8555_cds.h> | 28 | #include <platforms/85xx/mpc8555_cds.h> |
29 | #endif | 29 | #endif |
30 | #ifdef CONFIG_MPC85xx_CDS | ||
31 | #include <platforms/85xx/mpc85xx_cds.h> | ||
32 | #endif | ||
30 | #ifdef CONFIG_MPC8560_ADS | 33 | #ifdef CONFIG_MPC8560_ADS |
31 | #include <platforms/85xx/mpc8560_ads.h> | 34 | #include <platforms/85xx/mpc8560_ads.h> |
32 | #endif | 35 | #endif |
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 9cb83679836c..51fa7c662917 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h | |||
@@ -662,7 +662,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon | |||
662 | return (old & _PAGE_ACCESSED) != 0; | 662 | return (old & _PAGE_ACCESSED) != 0; |
663 | } | 663 | } |
664 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | 664 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
665 | __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep) | 665 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
666 | 666 | ||
667 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 667 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
668 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, | 668 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, |