aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorakpm@linux-foundation.org <akpm@linux-foundation.org>2007-07-24 22:44:55 -0400
committerTony Luck <tony.luck@intel.com>2007-07-25 15:56:39 -0400
commit3b74d18e54e20fc1d398eb391bea5b9aed22aca5 (patch)
treeab0c4f72ce87ea02978799c9e3fdc79c05bd1c9b /arch
parenta07ee86205808d36973440e68c7277f9ed63b87f (diff)
[IA64] rename partial_page
Jens has added a partial_page thing in splice whcih conflicts with the ia64 one. Rename ia64 out of the way. (ia64 chose poorly). Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/ia32/ia32_support.c8
-rw-r--r--arch/ia64/ia32/ia32priv.h12
-rw-r--r--arch/ia64/ia32/sys_ia32.c81
-rw-r--r--arch/ia64/kernel/process.c7
4 files changed, 55 insertions, 53 deletions
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index e13a1a1db4b5..d1d50cd1c38a 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,11 +249,11 @@ ia32_init (void)
249 249
250#if PAGE_SHIFT > IA32_PAGE_SHIFT 250#if PAGE_SHIFT > IA32_PAGE_SHIFT
251 { 251 {
252 extern struct kmem_cache *partial_page_cachep; 252 extern struct kmem_cache *ia64_partial_page_cachep;
253 253
254 partial_page_cachep = kmem_cache_create("partial_page_cache", 254 ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
255 sizeof(struct partial_page), 255 sizeof(struct ia64_partial_page),
256 0, SLAB_PANIC, NULL); 256 0, SLAB_PANIC, NULL);
257 } 257 }
258#endif 258#endif
259 return 0; 259 return 0;
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index cfa0bc0026b5..466bbcb138b2 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -25,8 +25,8 @@
25 * partially mapped pages provide precise accounting of which 4k sub pages 25 * partially mapped pages provide precise accounting of which 4k sub pages
26 * are mapped and which ones are not, thereby improving IA-32 compatibility. 26 * are mapped and which ones are not, thereby improving IA-32 compatibility.
27 */ 27 */
28struct partial_page { 28struct ia64_partial_page {
29 struct partial_page *next; /* linked list, sorted by address */ 29 struct ia64_partial_page *next; /* linked list, sorted by address */
30 struct rb_node pp_rb; 30 struct rb_node pp_rb;
31 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 31 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
32 * should suffice.*/ 32 * should suffice.*/
@@ -34,17 +34,17 @@ struct partial_page {
34 unsigned int base; 34 unsigned int base;
35}; 35};
36 36
37struct partial_page_list { 37struct ia64_partial_page_list {
38 struct partial_page *pp_head; /* list head, points to the lowest 38 struct ia64_partial_page *pp_head; /* list head, points to the lowest
39 * addressed partial page */ 39 * addressed partial page */
40 struct rb_root ppl_rb; 40 struct rb_root ppl_rb;
41 struct partial_page *pp_hint; /* pp_hint->next is the last 41 struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
42 * accessed partial page */ 42 * accessed partial page */
43 atomic_t pp_count; /* reference count */ 43 atomic_t pp_count; /* reference count */
44}; 44};
45 45
46#if PAGE_SHIFT > IA32_PAGE_SHIFT 46#if PAGE_SHIFT > IA32_PAGE_SHIFT
47struct partial_page_list* ia32_init_pp_list (void); 47struct ia64_partial_page_list* ia32_init_pp_list (void);
48#else 48#else
49# define ia32_init_pp_list() 0 49# define ia32_init_pp_list() 0
50#endif 50#endif
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 0afb4fe7c35b..af10462d44d4 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
253 return ret; 253 return ret;
254} 254}
255 255
256/* SLAB cache for partial_page structures */ 256/* SLAB cache for ia64_partial_page structures */
257struct kmem_cache *partial_page_cachep; 257struct kmem_cache *ia64_partial_page_cachep;
258 258
259/* 259/*
260 * init partial_page_list. 260 * init ia64_partial_page_list.
261 * return 0 means kmalloc fail. 261 * return 0 means kmalloc fail.
262 */ 262 */
263struct partial_page_list* 263struct ia64_partial_page_list*
264ia32_init_pp_list(void) 264ia32_init_pp_list(void)
265{ 265{
266 struct partial_page_list *p; 266 struct ia64_partial_page_list *p;
267 267
268 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) 268 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
269 return p; 269 return p;
@@ -280,12 +280,12 @@ ia32_init_pp_list(void)
280 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to 280 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
281 * be used by later __ia32_insert_pp(). 281 * be used by later __ia32_insert_pp().
282 */ 282 */
283static struct partial_page * 283static struct ia64_partial_page *
284__ia32_find_pp(struct partial_page_list *ppl, unsigned int start, 284__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
285 struct partial_page **pprev, struct rb_node ***rb_link, 285 struct ia64_partial_page **pprev, struct rb_node ***rb_link,
286 struct rb_node **rb_parent) 286 struct rb_node **rb_parent)
287{ 287{
288 struct partial_page *pp; 288 struct ia64_partial_page *pp;
289 struct rb_node **__rb_link, *__rb_parent, *rb_prev; 289 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
290 290
291 pp = ppl->pp_hint; 291 pp = ppl->pp_hint;
@@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
297 297
298 while (*__rb_link) { 298 while (*__rb_link) {
299 __rb_parent = *__rb_link; 299 __rb_parent = *__rb_link;
300 pp = rb_entry(__rb_parent, struct partial_page, pp_rb); 300 pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
301 301
302 if (pp->base == start) { 302 if (pp->base == start) {
303 ppl->pp_hint = pp; 303 ppl->pp_hint = pp;
@@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
314 *rb_parent = __rb_parent; 314 *rb_parent = __rb_parent;
315 *pprev = NULL; 315 *pprev = NULL;
316 if (rb_prev) 316 if (rb_prev)
317 *pprev = rb_entry(rb_prev, struct partial_page, pp_rb); 317 *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
318 return NULL; 318 return NULL;
319} 319}
320 320
@@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
322 * insert @pp into @ppl. 322 * insert @pp into @ppl.
323 */ 323 */
324static void 324static void
325__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, 325__ia32_insert_pp(struct ia64_partial_page_list *ppl,
326 struct partial_page *prev, struct rb_node **rb_link, 326 struct ia64_partial_page *pp, struct ia64_partial_page *prev,
327 struct rb_node *rb_parent) 327 struct rb_node **rb_link, struct rb_node *rb_parent)
328{ 328{
329 /* link list */ 329 /* link list */
330 if (prev) { 330 if (prev) {
@@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
334 ppl->pp_head = pp; 334 ppl->pp_head = pp;
335 if (rb_parent) 335 if (rb_parent)
336 pp->next = rb_entry(rb_parent, 336 pp->next = rb_entry(rb_parent,
337 struct partial_page, pp_rb); 337 struct ia64_partial_page, pp_rb);
338 else 338 else
339 pp->next = NULL; 339 pp->next = NULL;
340 } 340 }
@@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
350 * delete @pp from partial page list @ppl. 350 * delete @pp from partial page list @ppl.
351 */ 351 */
352static void 352static void
353__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, 353__ia32_delete_pp(struct ia64_partial_page_list *ppl,
354 struct partial_page *prev) 354 struct ia64_partial_page *pp, struct ia64_partial_page *prev)
355{ 355{
356 if (prev) { 356 if (prev) {
357 prev->next = pp->next; 357 prev->next = pp->next;
@@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
363 ppl->pp_hint = pp->next; 363 ppl->pp_hint = pp->next;
364 } 364 }
365 rb_erase(&pp->pp_rb, &ppl->ppl_rb); 365 rb_erase(&pp->pp_rb, &ppl->ppl_rb);
366 kmem_cache_free(partial_page_cachep, pp); 366 kmem_cache_free(ia64_partial_page_cachep, pp);
367} 367}
368 368
369static struct partial_page * 369static struct ia64_partial_page *
370__pp_prev(struct partial_page *pp) 370__pp_prev(struct ia64_partial_page *pp)
371{ 371{
372 struct rb_node *prev = rb_prev(&pp->pp_rb); 372 struct rb_node *prev = rb_prev(&pp->pp_rb);
373 if (prev) 373 if (prev)
374 return rb_entry(prev, struct partial_page, pp_rb); 374 return rb_entry(prev, struct ia64_partial_page, pp_rb);
375 else 375 else
376 return NULL; 376 return NULL;
377} 377}
@@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp)
383static void 383static void
384__ia32_delete_pp_range(unsigned int start, unsigned int end) 384__ia32_delete_pp_range(unsigned int start, unsigned int end)
385{ 385{
386 struct partial_page *pp, *prev; 386 struct ia64_partial_page *pp, *prev;
387 struct rb_node **rb_link, *rb_parent; 387 struct rb_node **rb_link, *rb_parent;
388 388
389 if (start >= end) 389 if (start >= end)
@@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
401 } 401 }
402 402
403 while (pp && pp->base < end) { 403 while (pp && pp->base < end) {
404 struct partial_page *tmp = pp->next; 404 struct ia64_partial_page *tmp = pp->next;
405 __ia32_delete_pp(current->thread.ppl, pp, prev); 405 __ia32_delete_pp(current->thread.ppl, pp, prev);
406 pp = tmp; 406 pp = tmp;
407 } 407 }
@@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
414static int 414static int
415__ia32_set_pp(unsigned int start, unsigned int end, int flags) 415__ia32_set_pp(unsigned int start, unsigned int end, int flags)
416{ 416{
417 struct partial_page *pp, *prev; 417 struct ia64_partial_page *pp, *prev;
418 struct rb_node ** rb_link, *rb_parent; 418 struct rb_node ** rb_link, *rb_parent;
419 unsigned int pstart, start_bit, end_bit, i; 419 unsigned int pstart, start_bit, end_bit, i;
420 420
@@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags)
450 return 0; 450 return 0;
451 } 451 }
452 452
453 /* new a partial_page */ 453 /* new a ia64_partial_page */
454 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); 454 pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
455 if (!pp) 455 if (!pp)
456 return -ENOMEM; 456 return -ENOMEM;
457 pp->base = pstart; 457 pp->base = pstart;
@@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags)
504static int 504static int
505__ia32_unset_pp(unsigned int start, unsigned int end) 505__ia32_unset_pp(unsigned int start, unsigned int end)
506{ 506{
507 struct partial_page *pp, *prev; 507 struct ia64_partial_page *pp, *prev;
508 struct rb_node ** rb_link, *rb_parent; 508 struct rb_node ** rb_link, *rb_parent;
509 unsigned int pstart, start_bit, end_bit, i; 509 unsigned int pstart, start_bit, end_bit, i;
510 struct vm_area_struct *vma; 510 struct vm_area_struct *vma;
@@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end)
532 return -ENOMEM; 532 return -ENOMEM;
533 } 533 }
534 534
535 /* new a partial_page */ 535 /* new a ia64_partial_page */
536 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); 536 pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
537 if (!pp) 537 if (!pp)
538 return -ENOMEM; 538 return -ENOMEM;
539 pp->base = pstart; 539 pp->base = pstart;
@@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp)
605static int 605static int
606__ia32_compare_pp(unsigned int start, unsigned int end) 606__ia32_compare_pp(unsigned int start, unsigned int end)
607{ 607{
608 struct partial_page *pp, *prev; 608 struct ia64_partial_page *pp, *prev;
609 struct rb_node ** rb_link, *rb_parent; 609 struct rb_node ** rb_link, *rb_parent;
610 unsigned int pstart, start_bit, end_bit, size; 610 unsigned int pstart, start_bit, end_bit, size;
611 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ 611 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
@@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp)
682} 682}
683 683
684static void 684static void
685__ia32_drop_pp_list(struct partial_page_list *ppl) 685__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
686{ 686{
687 struct partial_page *pp = ppl->pp_head; 687 struct ia64_partial_page *pp = ppl->pp_head;
688 688
689 while (pp) { 689 while (pp) {
690 struct partial_page *next = pp->next; 690 struct ia64_partial_page *next = pp->next;
691 kmem_cache_free(partial_page_cachep, pp); 691 kmem_cache_free(ia64_partial_page_cachep, pp);
692 pp = next; 692 pp = next;
693 } 693 }
694 694
@@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl)
696} 696}
697 697
698void 698void
699ia32_drop_partial_page_list(struct task_struct *task) 699ia32_drop_ia64_partial_page_list(struct task_struct *task)
700{ 700{
701 struct partial_page_list* ppl = task->thread.ppl; 701 struct ia64_partial_page_list* ppl = task->thread.ppl;
702 702
703 if (ppl && atomic_dec_and_test(&ppl->pp_count)) 703 if (ppl && atomic_dec_and_test(&ppl->pp_count))
704 __ia32_drop_pp_list(ppl); 704 __ia32_drop_pp_list(ppl);
@@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task)
708 * Copy current->thread.ppl to ppl (already initialized). 708 * Copy current->thread.ppl to ppl (already initialized).
709 */ 709 */
710static int 710static int
711__ia32_copy_pp_list(struct partial_page_list *ppl) 711__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
712{ 712{
713 struct partial_page *pp, *tmp, *prev; 713 struct ia64_partial_page *pp, *tmp, *prev;
714 struct rb_node **rb_link, *rb_parent; 714 struct rb_node **rb_link, *rb_parent;
715 715
716 ppl->pp_head = NULL; 716 ppl->pp_head = NULL;
@@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
721 prev = NULL; 721 prev = NULL;
722 722
723 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { 723 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
724 tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); 724 tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
725 if (!tmp) 725 if (!tmp)
726 return -ENOMEM; 726 return -ENOMEM;
727 *tmp = *pp; 727 *tmp = *pp;
@@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
734} 734}
735 735
736int 736int
737ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags) 737ia32_copy_ia64_partial_page_list(struct task_struct *p,
738 unsigned long clone_flags)
738{ 739{
739 int retval = 0; 740 int retval = 0;
740 741
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index fa40cba43350..4158906c45aa 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags,
499 499
500 /* Copy partially mapped page list */ 500 /* Copy partially mapped page list */
501 if (!retval) 501 if (!retval)
502 retval = ia32_copy_partial_page_list(p, clone_flags); 502 retval = ia32_copy_ia64_partial_page_list(p,
503 clone_flags);
503 } 504 }
504#endif 505#endif
505 506
@@ -728,7 +729,7 @@ flush_thread (void)
728 ia64_drop_fpu(current); 729 ia64_drop_fpu(current);
729#ifdef CONFIG_IA32_SUPPORT 730#ifdef CONFIG_IA32_SUPPORT
730 if (IS_IA32_PROCESS(task_pt_regs(current))) { 731 if (IS_IA32_PROCESS(task_pt_regs(current))) {
731 ia32_drop_partial_page_list(current); 732 ia32_drop_ia64_partial_page_list(current);
732 current->thread.task_size = IA32_PAGE_OFFSET; 733 current->thread.task_size = IA32_PAGE_OFFSET;
733 set_fs(USER_DS); 734 set_fs(USER_DS);
734 } 735 }
@@ -754,7 +755,7 @@ exit_thread (void)
754 pfm_release_debug_registers(current); 755 pfm_release_debug_registers(current);
755#endif 756#endif
756 if (IS_IA32_PROCESS(task_pt_regs(current))) 757 if (IS_IA32_PROCESS(task_pt_regs(current)))
757 ia32_drop_partial_page_list(current); 758 ia32_drop_ia64_partial_page_list(current);
758} 759}
759 760
760unsigned long 761unsigned long