diff options
| -rw-r--r-- | arch/ia64/ia32/ia32_support.c | 8 | ||||
| -rw-r--r-- | arch/ia64/ia32/ia32priv.h | 12 | ||||
| -rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 81 | ||||
| -rw-r--r-- | arch/ia64/kernel/process.c | 7 | ||||
| -rw-r--r-- | include/asm-ia64/ia32.h | 9 | ||||
| -rw-r--r-- | include/asm-ia64/processor.h | 4 |
6 files changed, 62 insertions, 59 deletions
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index e13a1a1db4b5..d1d50cd1c38a 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c | |||
| @@ -249,11 +249,11 @@ ia32_init (void) | |||
| 249 | 249 | ||
| 250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
| 251 | { | 251 | { |
| 252 | extern struct kmem_cache *partial_page_cachep; | 252 | extern struct kmem_cache *ia64_partial_page_cachep; |
| 253 | 253 | ||
| 254 | partial_page_cachep = kmem_cache_create("partial_page_cache", | 254 | ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache", |
| 255 | sizeof(struct partial_page), | 255 | sizeof(struct ia64_partial_page), |
| 256 | 0, SLAB_PANIC, NULL); | 256 | 0, SLAB_PANIC, NULL); |
| 257 | } | 257 | } |
| 258 | #endif | 258 | #endif |
| 259 | return 0; | 259 | return 0; |
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index cfa0bc0026b5..466bbcb138b2 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h | |||
| @@ -25,8 +25,8 @@ | |||
| 25 | * partially mapped pages provide precise accounting of which 4k sub pages | 25 | * partially mapped pages provide precise accounting of which 4k sub pages |
| 26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. | 26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. |
| 27 | */ | 27 | */ |
| 28 | struct partial_page { | 28 | struct ia64_partial_page { |
| 29 | struct partial_page *next; /* linked list, sorted by address */ | 29 | struct ia64_partial_page *next; /* linked list, sorted by address */ |
| 30 | struct rb_node pp_rb; | 30 | struct rb_node pp_rb; |
| 31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 | 31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 |
| 32 | * should suffice.*/ | 32 | * should suffice.*/ |
| @@ -34,17 +34,17 @@ struct partial_page { | |||
| 34 | unsigned int base; | 34 | unsigned int base; |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct partial_page_list { | 37 | struct ia64_partial_page_list { |
| 38 | struct partial_page *pp_head; /* list head, points to the lowest | 38 | struct ia64_partial_page *pp_head; /* list head, points to the lowest |
| 39 | * addressed partial page */ | 39 | * addressed partial page */ |
| 40 | struct rb_root ppl_rb; | 40 | struct rb_root ppl_rb; |
| 41 | struct partial_page *pp_hint; /* pp_hint->next is the last | 41 | struct ia64_partial_page *pp_hint; /* pp_hint->next is the last |
| 42 | * accessed partial page */ | 42 | * accessed partial page */ |
| 43 | atomic_t pp_count; /* reference count */ | 43 | atomic_t pp_count; /* reference count */ |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
| 47 | struct partial_page_list* ia32_init_pp_list (void); | 47 | struct ia64_partial_page_list* ia32_init_pp_list (void); |
| 48 | #else | 48 | #else |
| 49 | # define ia32_init_pp_list() 0 | 49 | # define ia32_init_pp_list() 0 |
| 50 | #endif | 50 | #endif |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 0afb4fe7c35b..af10462d44d4 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
| @@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro | |||
| 253 | return ret; | 253 | return ret; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | /* SLAB cache for partial_page structures */ | 256 | /* SLAB cache for ia64_partial_page structures */ |
| 257 | struct kmem_cache *partial_page_cachep; | 257 | struct kmem_cache *ia64_partial_page_cachep; |
| 258 | 258 | ||
| 259 | /* | 259 | /* |
| 260 | * init partial_page_list. | 260 | * init ia64_partial_page_list. |
| 261 | * return 0 means kmalloc fail. | 261 | * return 0 means kmalloc fail. |
| 262 | */ | 262 | */ |
| 263 | struct partial_page_list* | 263 | struct ia64_partial_page_list* |
| 264 | ia32_init_pp_list(void) | 264 | ia32_init_pp_list(void) |
| 265 | { | 265 | { |
| 266 | struct partial_page_list *p; | 266 | struct ia64_partial_page_list *p; |
| 267 | 267 | ||
| 268 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) | 268 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) |
| 269 | return p; | 269 | return p; |
| @@ -280,12 +280,12 @@ ia32_init_pp_list(void) | |||
| 280 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to | 280 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to |
| 281 | * be used by later __ia32_insert_pp(). | 281 | * be used by later __ia32_insert_pp(). |
| 282 | */ | 282 | */ |
| 283 | static struct partial_page * | 283 | static struct ia64_partial_page * |
| 284 | __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | 284 | __ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start, |
| 285 | struct partial_page **pprev, struct rb_node ***rb_link, | 285 | struct ia64_partial_page **pprev, struct rb_node ***rb_link, |
| 286 | struct rb_node **rb_parent) | 286 | struct rb_node **rb_parent) |
| 287 | { | 287 | { |
| 288 | struct partial_page *pp; | 288 | struct ia64_partial_page *pp; |
| 289 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; | 289 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; |
| 290 | 290 | ||
| 291 | pp = ppl->pp_hint; | 291 | pp = ppl->pp_hint; |
| @@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | |||
| 297 | 297 | ||
| 298 | while (*__rb_link) { | 298 | while (*__rb_link) { |
| 299 | __rb_parent = *__rb_link; | 299 | __rb_parent = *__rb_link; |
| 300 | pp = rb_entry(__rb_parent, struct partial_page, pp_rb); | 300 | pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb); |
| 301 | 301 | ||
| 302 | if (pp->base == start) { | 302 | if (pp->base == start) { |
| 303 | ppl->pp_hint = pp; | 303 | ppl->pp_hint = pp; |
| @@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | |||
| 314 | *rb_parent = __rb_parent; | 314 | *rb_parent = __rb_parent; |
| 315 | *pprev = NULL; | 315 | *pprev = NULL; |
| 316 | if (rb_prev) | 316 | if (rb_prev) |
| 317 | *pprev = rb_entry(rb_prev, struct partial_page, pp_rb); | 317 | *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb); |
| 318 | return NULL; | 318 | return NULL; |
| 319 | } | 319 | } |
| 320 | 320 | ||
| @@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | |||
| 322 | * insert @pp into @ppl. | 322 | * insert @pp into @ppl. |
| 323 | */ | 323 | */ |
| 324 | static void | 324 | static void |
| 325 | __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | 325 | __ia32_insert_pp(struct ia64_partial_page_list *ppl, |
| 326 | struct partial_page *prev, struct rb_node **rb_link, | 326 | struct ia64_partial_page *pp, struct ia64_partial_page *prev, |
| 327 | struct rb_node *rb_parent) | 327 | struct rb_node **rb_link, struct rb_node *rb_parent) |
| 328 | { | 328 | { |
| 329 | /* link list */ | 329 | /* link list */ |
| 330 | if (prev) { | 330 | if (prev) { |
| @@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | |||
| 334 | ppl->pp_head = pp; | 334 | ppl->pp_head = pp; |
| 335 | if (rb_parent) | 335 | if (rb_parent) |
| 336 | pp->next = rb_entry(rb_parent, | 336 | pp->next = rb_entry(rb_parent, |
| 337 | struct partial_page, pp_rb); | 337 | struct ia64_partial_page, pp_rb); |
| 338 | else | 338 | else |
| 339 | pp->next = NULL; | 339 | pp->next = NULL; |
| 340 | } | 340 | } |
| @@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | |||
| 350 | * delete @pp from partial page list @ppl. | 350 | * delete @pp from partial page list @ppl. |
| 351 | */ | 351 | */ |
| 352 | static void | 352 | static void |
| 353 | __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, | 353 | __ia32_delete_pp(struct ia64_partial_page_list *ppl, |
| 354 | struct partial_page *prev) | 354 | struct ia64_partial_page *pp, struct ia64_partial_page *prev) |
| 355 | { | 355 | { |
| 356 | if (prev) { | 356 | if (prev) { |
| 357 | prev->next = pp->next; | 357 | prev->next = pp->next; |
| @@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, | |||
| 363 | ppl->pp_hint = pp->next; | 363 | ppl->pp_hint = pp->next; |
| 364 | } | 364 | } |
| 365 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); | 365 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); |
| 366 | kmem_cache_free(partial_page_cachep, pp); | 366 | kmem_cache_free(ia64_partial_page_cachep, pp); |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | static struct partial_page * | 369 | static struct ia64_partial_page * |
| 370 | __pp_prev(struct partial_page *pp) | 370 | __pp_prev(struct ia64_partial_page *pp) |
| 371 | { | 371 | { |
| 372 | struct rb_node *prev = rb_prev(&pp->pp_rb); | 372 | struct rb_node *prev = rb_prev(&pp->pp_rb); |
| 373 | if (prev) | 373 | if (prev) |
| 374 | return rb_entry(prev, struct partial_page, pp_rb); | 374 | return rb_entry(prev, struct ia64_partial_page, pp_rb); |
| 375 | else | 375 | else |
| 376 | return NULL; | 376 | return NULL; |
| 377 | } | 377 | } |
| @@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp) | |||
| 383 | static void | 383 | static void |
| 384 | __ia32_delete_pp_range(unsigned int start, unsigned int end) | 384 | __ia32_delete_pp_range(unsigned int start, unsigned int end) |
| 385 | { | 385 | { |
| 386 | struct partial_page *pp, *prev; | 386 | struct ia64_partial_page *pp, *prev; |
| 387 | struct rb_node **rb_link, *rb_parent; | 387 | struct rb_node **rb_link, *rb_parent; |
| 388 | 388 | ||
| 389 | if (start >= end) | 389 | if (start >= end) |
| @@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end) | |||
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | while (pp && pp->base < end) { | 403 | while (pp && pp->base < end) { |
| 404 | struct partial_page *tmp = pp->next; | 404 | struct ia64_partial_page *tmp = pp->next; |
| 405 | __ia32_delete_pp(current->thread.ppl, pp, prev); | 405 | __ia32_delete_pp(current->thread.ppl, pp, prev); |
| 406 | pp = tmp; | 406 | pp = tmp; |
| 407 | } | 407 | } |
| @@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end) | |||
| 414 | static int | 414 | static int |
| 415 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) | 415 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) |
| 416 | { | 416 | { |
| 417 | struct partial_page *pp, *prev; | 417 | struct ia64_partial_page *pp, *prev; |
| 418 | struct rb_node ** rb_link, *rb_parent; | 418 | struct rb_node ** rb_link, *rb_parent; |
| 419 | unsigned int pstart, start_bit, end_bit, i; | 419 | unsigned int pstart, start_bit, end_bit, i; |
| 420 | 420 | ||
| @@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags) | |||
| 450 | return 0; | 450 | return 0; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | /* new a partial_page */ | 453 | /* new a ia64_partial_page */ |
| 454 | pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 454 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
| 455 | if (!pp) | 455 | if (!pp) |
| 456 | return -ENOMEM; | 456 | return -ENOMEM; |
| 457 | pp->base = pstart; | 457 | pp->base = pstart; |
| @@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags) | |||
| 504 | static int | 504 | static int |
| 505 | __ia32_unset_pp(unsigned int start, unsigned int end) | 505 | __ia32_unset_pp(unsigned int start, unsigned int end) |
| 506 | { | 506 | { |
| 507 | struct partial_page *pp, *prev; | 507 | struct ia64_partial_page *pp, *prev; |
| 508 | struct rb_node ** rb_link, *rb_parent; | 508 | struct rb_node ** rb_link, *rb_parent; |
| 509 | unsigned int pstart, start_bit, end_bit, i; | 509 | unsigned int pstart, start_bit, end_bit, i; |
| 510 | struct vm_area_struct *vma; | 510 | struct vm_area_struct *vma; |
| @@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end) | |||
| 532 | return -ENOMEM; | 532 | return -ENOMEM; |
| 533 | } | 533 | } |
| 534 | 534 | ||
| 535 | /* new a partial_page */ | 535 | /* new a ia64_partial_page */ |
| 536 | pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 536 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
| 537 | if (!pp) | 537 | if (!pp) |
| 538 | return -ENOMEM; | 538 | return -ENOMEM; |
| 539 | pp->base = pstart; | 539 | pp->base = pstart; |
| @@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp) | |||
| 605 | static int | 605 | static int |
| 606 | __ia32_compare_pp(unsigned int start, unsigned int end) | 606 | __ia32_compare_pp(unsigned int start, unsigned int end) |
| 607 | { | 607 | { |
| 608 | struct partial_page *pp, *prev; | 608 | struct ia64_partial_page *pp, *prev; |
| 609 | struct rb_node ** rb_link, *rb_parent; | 609 | struct rb_node ** rb_link, *rb_parent; |
| 610 | unsigned int pstart, start_bit, end_bit, size; | 610 | unsigned int pstart, start_bit, end_bit, size; |
| 611 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ | 611 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ |
| @@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp) | |||
| 682 | } | 682 | } |
| 683 | 683 | ||
| 684 | static void | 684 | static void |
| 685 | __ia32_drop_pp_list(struct partial_page_list *ppl) | 685 | __ia32_drop_pp_list(struct ia64_partial_page_list *ppl) |
| 686 | { | 686 | { |
| 687 | struct partial_page *pp = ppl->pp_head; | 687 | struct ia64_partial_page *pp = ppl->pp_head; |
| 688 | 688 | ||
| 689 | while (pp) { | 689 | while (pp) { |
| 690 | struct partial_page *next = pp->next; | 690 | struct ia64_partial_page *next = pp->next; |
| 691 | kmem_cache_free(partial_page_cachep, pp); | 691 | kmem_cache_free(ia64_partial_page_cachep, pp); |
| 692 | pp = next; | 692 | pp = next; |
| 693 | } | 693 | } |
| 694 | 694 | ||
| @@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl) | |||
| 696 | } | 696 | } |
| 697 | 697 | ||
| 698 | void | 698 | void |
| 699 | ia32_drop_partial_page_list(struct task_struct *task) | 699 | ia32_drop_ia64_partial_page_list(struct task_struct *task) |
| 700 | { | 700 | { |
| 701 | struct partial_page_list* ppl = task->thread.ppl; | 701 | struct ia64_partial_page_list* ppl = task->thread.ppl; |
| 702 | 702 | ||
| 703 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) | 703 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) |
| 704 | __ia32_drop_pp_list(ppl); | 704 | __ia32_drop_pp_list(ppl); |
| @@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task) | |||
| 708 | * Copy current->thread.ppl to ppl (already initialized). | 708 | * Copy current->thread.ppl to ppl (already initialized). |
| 709 | */ | 709 | */ |
| 710 | static int | 710 | static int |
| 711 | __ia32_copy_pp_list(struct partial_page_list *ppl) | 711 | __ia32_copy_pp_list(struct ia64_partial_page_list *ppl) |
| 712 | { | 712 | { |
| 713 | struct partial_page *pp, *tmp, *prev; | 713 | struct ia64_partial_page *pp, *tmp, *prev; |
| 714 | struct rb_node **rb_link, *rb_parent; | 714 | struct rb_node **rb_link, *rb_parent; |
| 715 | 715 | ||
| 716 | ppl->pp_head = NULL; | 716 | ppl->pp_head = NULL; |
| @@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl) | |||
| 721 | prev = NULL; | 721 | prev = NULL; |
| 722 | 722 | ||
| 723 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { | 723 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { |
| 724 | tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 724 | tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
| 725 | if (!tmp) | 725 | if (!tmp) |
| 726 | return -ENOMEM; | 726 | return -ENOMEM; |
| 727 | *tmp = *pp; | 727 | *tmp = *pp; |
| @@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl) | |||
| 734 | } | 734 | } |
| 735 | 735 | ||
| 736 | int | 736 | int |
| 737 | ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags) | 737 | ia32_copy_ia64_partial_page_list(struct task_struct *p, |
| 738 | unsigned long clone_flags) | ||
| 738 | { | 739 | { |
| 739 | int retval = 0; | 740 | int retval = 0; |
| 740 | 741 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index fa40cba43350..4158906c45aa 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
| @@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags, | |||
| 499 | 499 | ||
| 500 | /* Copy partially mapped page list */ | 500 | /* Copy partially mapped page list */ |
| 501 | if (!retval) | 501 | if (!retval) |
| 502 | retval = ia32_copy_partial_page_list(p, clone_flags); | 502 | retval = ia32_copy_ia64_partial_page_list(p, |
| 503 | clone_flags); | ||
| 503 | } | 504 | } |
| 504 | #endif | 505 | #endif |
| 505 | 506 | ||
| @@ -728,7 +729,7 @@ flush_thread (void) | |||
| 728 | ia64_drop_fpu(current); | 729 | ia64_drop_fpu(current); |
| 729 | #ifdef CONFIG_IA32_SUPPORT | 730 | #ifdef CONFIG_IA32_SUPPORT |
| 730 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | 731 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
| 731 | ia32_drop_partial_page_list(current); | 732 | ia32_drop_ia64_partial_page_list(current); |
| 732 | current->thread.task_size = IA32_PAGE_OFFSET; | 733 | current->thread.task_size = IA32_PAGE_OFFSET; |
| 733 | set_fs(USER_DS); | 734 | set_fs(USER_DS); |
| 734 | } | 735 | } |
| @@ -754,7 +755,7 @@ exit_thread (void) | |||
| 754 | pfm_release_debug_registers(current); | 755 | pfm_release_debug_registers(current); |
| 755 | #endif | 756 | #endif |
| 756 | if (IS_IA32_PROCESS(task_pt_regs(current))) | 757 | if (IS_IA32_PROCESS(task_pt_regs(current))) |
| 757 | ia32_drop_partial_page_list(current); | 758 | ia32_drop_ia64_partial_page_list(current); |
| 758 | } | 759 | } |
| 759 | 760 | ||
| 760 | unsigned long | 761 | unsigned long |
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h index 5ff8d74c3e00..2390ee145aa1 100644 --- a/include/asm-ia64/ia32.h +++ b/include/asm-ia64/ia32.h | |||
| @@ -27,11 +27,12 @@ extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs) | |||
| 27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, | 27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, |
| 28 | sigset_t *set, struct pt_regs *regs); | 28 | sigset_t *set, struct pt_regs *regs); |
| 29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
| 30 | extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long); | 30 | extern int ia32_copy_ia64_partial_page_list(struct task_struct *, |
| 31 | extern void ia32_drop_partial_page_list (struct task_struct *); | 31 | unsigned long); |
| 32 | extern void ia32_drop_ia64_partial_page_list(struct task_struct *); | ||
| 32 | #else | 33 | #else |
| 33 | # define ia32_copy_partial_page_list(a1, a2) 0 | 34 | # define ia32_copy_ia64_partial_page_list(a1, a2) 0 |
| 34 | # define ia32_drop_partial_page_list(a1) do { ; } while (0) | 35 | # define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0) |
| 35 | #endif | 36 | #endif |
| 36 | 37 | ||
| 37 | #endif /* !__ASSEMBLY__ */ | 38 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 6251c76437d2..be3b0ae43270 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
| @@ -220,7 +220,7 @@ struct desc_struct { | |||
| 220 | 220 | ||
| 221 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | 221 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
| 222 | 222 | ||
| 223 | struct partial_page_list; | 223 | struct ia64_partial_page_list; |
| 224 | #endif | 224 | #endif |
| 225 | 225 | ||
| 226 | struct thread_struct { | 226 | struct thread_struct { |
| @@ -242,7 +242,7 @@ struct thread_struct { | |||
| 242 | __u64 fdr; /* IA32 fp except. data reg */ | 242 | __u64 fdr; /* IA32 fp except. data reg */ |
| 243 | __u64 old_k1; /* old value of ar.k1 */ | 243 | __u64 old_k1; /* old value of ar.k1 */ |
| 244 | __u64 old_iob; /* old IOBase value */ | 244 | __u64 old_iob; /* old IOBase value */ |
| 245 | struct partial_page_list *ppl; /* partial page list for 4K page size issue */ | 245 | struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */ |
| 246 | /* cached TLS descriptors. */ | 246 | /* cached TLS descriptors. */ |
| 247 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 247 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
| 248 | 248 | ||
