aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-04-09 05:57:00 -0400
committerTejun Heo <tj@kernel.org>2010-05-01 02:30:49 -0400
commit020ec6537aa65c18e9084c568d7b94727f2026fd (patch)
tree0466d590090ed9db214846887e7ea636fcd26169 /mm/percpu.c
parentbe1066bbcd443a65df312fdecea7e4959adedb45 (diff)
percpu: factor out pcpu_addr_in_first/reserved_chunk() and update per_cpu_ptr_to_phys()
Factor out pcpu_addr_in_first/reserved_chunk() from pcpu_chunk_addr_search() and use it to update per_cpu_ptr_to_phys() such that it handles first chunk differently from the rest. This patch doesn't cause any functional change and is to prepare for percpu nommu support. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Howells <dhowells@redhat.com> Cc: Graff Yang <graff.yang@gmail.com> Cc: Sonic Zhang <sonic.adi@gmail.com>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 6e09741ddc62..1aeb081f30ec 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -177,6 +177,21 @@ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
177static void pcpu_reclaim(struct work_struct *work); 177static void pcpu_reclaim(struct work_struct *work);
178static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 178static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
179 179
180static bool pcpu_addr_in_first_chunk(void *addr)
181{
182 void *first_start = pcpu_first_chunk->base_addr;
183
184 return addr >= first_start && addr < first_start + pcpu_unit_size;
185}
186
187static bool pcpu_addr_in_reserved_chunk(void *addr)
188{
189 void *first_start = pcpu_first_chunk->base_addr;
190
191 return addr >= first_start &&
192 addr < first_start + pcpu_reserved_chunk_limit;
193}
194
180static int __pcpu_size_to_slot(int size) 195static int __pcpu_size_to_slot(int size)
181{ 196{
182 int highbit = fls(size); /* size is in bytes */ 197 int highbit = fls(size); /* size is in bytes */
@@ -334,12 +349,10 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
334 */ 349 */
335static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 350static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
336{ 351{
337 void *first_start = pcpu_first_chunk->base_addr;
338
339 /* is it in the first chunk? */ 352 /* is it in the first chunk? */
340 if (addr >= first_start && addr < first_start + pcpu_unit_size) { 353 if (pcpu_addr_in_first_chunk(addr)) {
341 /* is it in the reserved area? */ 354 /* is it in the reserved area? */
342 if (addr < first_start + pcpu_reserved_chunk_limit) 355 if (pcpu_addr_in_reserved_chunk(addr))
343 return pcpu_reserved_chunk; 356 return pcpu_reserved_chunk;
344 return pcpu_first_chunk; 357 return pcpu_first_chunk;
345 } 358 }
@@ -1343,10 +1356,13 @@ bool is_kernel_percpu_address(unsigned long addr)
1343 */ 1356 */
1344phys_addr_t per_cpu_ptr_to_phys(void *addr) 1357phys_addr_t per_cpu_ptr_to_phys(void *addr)
1345{ 1358{
1346 if ((unsigned long)addr < VMALLOC_START || 1359 if (pcpu_addr_in_first_chunk(addr)) {
1347 (unsigned long)addr >= VMALLOC_END) 1360 if ((unsigned long)addr < VMALLOC_START ||
1348 return __pa(addr); 1361 (unsigned long)addr >= VMALLOC_END)
1349 else 1362 return __pa(addr);
1363 else
1364 return page_to_phys(vmalloc_to_page(addr));
1365 } else
1350 return page_to_phys(vmalloc_to_page(addr)); 1366 return page_to_phys(vmalloc_to_page(addr));
1351} 1367}
1352 1368