diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2013-08-07 11:36:54 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2013-08-13 16:26:01 -0400 |
commit | bc1a298f4e04833db4c430df59b90039f0170515 (patch) | |
tree | 802da739309efeab62317f62ec4f1989f3f7d8dd /arch/tile/lib | |
parent | 1182b69cb24c4f7d7ee8c8afe41b5ab2bc05a15b (diff) |
tile: support CONFIG_PREEMPT
This change adds support for CONFIG_PREEMPT (full kernel preemption).
In addition to the core support, this change includes a number
of places where we fix up uses of smp_processor_id() and per-cpu
variables. I also eliminate the PAGE_HOME_HERE and PAGE_HOME_UNKNOWN
values for page homing, as it turns out they weren't being used.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/lib')
-rw-r--r-- | arch/tile/lib/memcpy_tile64.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c index 3bc4b4e40d93..0290c222847b 100644 --- a/arch/tile/lib/memcpy_tile64.c +++ b/arch/tile/lib/memcpy_tile64.c | |||
@@ -65,7 +65,7 @@ static void memcpy_multicache(void *dest, const void *source, | |||
65 | pmd_t *pmdp; | 65 | pmd_t *pmdp; |
66 | pte_t *ptep; | 66 | pte_t *ptep; |
67 | int type0, type1; | 67 | int type0, type1; |
68 | int cpu = get_cpu(); | 68 | int cpu = smp_processor_id(); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Disable interrupts so that we don't recurse into memcpy() | 71 | * Disable interrupts so that we don't recurse into memcpy() |
@@ -126,7 +126,6 @@ static void memcpy_multicache(void *dest, const void *source, | |||
126 | kmap_atomic_idx_pop(); | 126 | kmap_atomic_idx_pop(); |
127 | sim_allow_multiple_caching(0); | 127 | sim_allow_multiple_caching(0); |
128 | local_irq_restore(flags); | 128 | local_irq_restore(flags); |
129 | put_cpu(); | ||
130 | } | 129 | } |
131 | 130 | ||
132 | /* | 131 | /* |
@@ -137,6 +136,9 @@ static void memcpy_multicache(void *dest, const void *source, | |||
137 | static unsigned long fast_copy(void *dest, const void *source, int len, | 136 | static unsigned long fast_copy(void *dest, const void *source, int len, |
138 | memcpy_t func) | 137 | memcpy_t func) |
139 | { | 138 | { |
139 | int cpu = get_cpu(); | ||
140 | unsigned long retval; | ||
141 | |||
140 | /* | 142 | /* |
141 | * Check if it's big enough to bother with. We may end up doing a | 143 | * Check if it's big enough to bother with. We may end up doing a |
142 | * small copy via TLB manipulation if we're near a page boundary, | 144 | * small copy via TLB manipulation if we're near a page boundary, |
@@ -158,7 +160,7 @@ retry_source: | |||
158 | !hv_pte_get_readable(src_pte) || | 160 | !hv_pte_get_readable(src_pte) || |
159 | hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) | 161 | hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) |
160 | break; | 162 | break; |
161 | if (get_remote_cache_cpu(src_pte) == smp_processor_id()) | 163 | if (get_remote_cache_cpu(src_pte) == cpu) |
162 | break; | 164 | break; |
163 | src_page = pfn_to_page(pte_pfn(src_pte)); | 165 | src_page = pfn_to_page(pte_pfn(src_pte)); |
164 | get_page(src_page); | 166 | get_page(src_page); |
@@ -235,7 +237,9 @@ retry_dest: | |||
235 | len -= copy_size; | 237 | len -= copy_size; |
236 | } | 238 | } |
237 | 239 | ||
238 | return func(dest, source, len); | 240 | retval = func(dest, source, len); |
241 | put_cpu(); | ||
242 | return retval; | ||
239 | } | 243 | } |
240 | 244 | ||
241 | void *memcpy(void *to, const void *from, __kernel_size_t n) | 245 | void *memcpy(void *to, const void *from, __kernel_size_t n) |