diff options
-rw-r--r-- | arch/ia64/ia32/ia32_support.c | 8 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32priv.h | 12 | ||||
-rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 81 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 26 | ||||
-rw-r--r-- | arch/ia64/kernel/machvec.c | 27 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 2 | ||||
-rw-r--r-- | include/asm-ia64/ia32.h | 9 | ||||
-rw-r--r-- | include/asm-ia64/machvec.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/processor.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/smp.h | 1 |
16 files changed, 111 insertions, 92 deletions
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index e13a1a1db4b5..d1d50cd1c38a 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c | |||
@@ -249,11 +249,11 @@ ia32_init (void) | |||
249 | 249 | ||
250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
251 | { | 251 | { |
252 | extern struct kmem_cache *partial_page_cachep; | 252 | extern struct kmem_cache *ia64_partial_page_cachep; |
253 | 253 | ||
254 | partial_page_cachep = kmem_cache_create("partial_page_cache", | 254 | ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache", |
255 | sizeof(struct partial_page), | 255 | sizeof(struct ia64_partial_page), |
256 | 0, SLAB_PANIC, NULL); | 256 | 0, SLAB_PANIC, NULL); |
257 | } | 257 | } |
258 | #endif | 258 | #endif |
259 | return 0; | 259 | return 0; |
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index cfa0bc0026b5..466bbcb138b2 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h | |||
@@ -25,8 +25,8 @@ | |||
25 | * partially mapped pages provide precise accounting of which 4k sub pages | 25 | * partially mapped pages provide precise accounting of which 4k sub pages |
26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. | 26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. |
27 | */ | 27 | */ |
28 | struct partial_page { | 28 | struct ia64_partial_page { |
29 | struct partial_page *next; /* linked list, sorted by address */ | 29 | struct ia64_partial_page *next; /* linked list, sorted by address */ |
30 | struct rb_node pp_rb; | 30 | struct rb_node pp_rb; |
31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 | 31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 |
32 | * should suffice.*/ | 32 | * should suffice.*/ |
@@ -34,17 +34,17 @@ struct partial_page { | |||
34 | unsigned int base; | 34 | unsigned int base; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct partial_page_list { | 37 | struct ia64_partial_page_list { |
38 | struct partial_page *pp_head; /* list head, points to the lowest | 38 | struct ia64_partial_page *pp_head; /* list head, points to the lowest |
39 | * addressed partial page */ | 39 | * addressed partial page */ |
40 | struct rb_root ppl_rb; | 40 | struct rb_root ppl_rb; |
41 | struct partial_page *pp_hint; /* pp_hint->next is the last | 41 | struct ia64_partial_page *pp_hint; /* pp_hint->next is the last |
42 | * accessed partial page */ | 42 | * accessed partial page */ |
43 | atomic_t pp_count; /* reference count */ | 43 | atomic_t pp_count; /* reference count */ |
44 | }; | 44 | }; |
45 | 45 | ||
46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
47 | struct partial_page_list* ia32_init_pp_list (void); | 47 | struct ia64_partial_page_list* ia32_init_pp_list (void); |
48 | #else | 48 | #else |
49 | # define ia32_init_pp_list() 0 | 49 | # define ia32_init_pp_list() 0 |
50 | #endif | 50 | #endif |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 0afb4fe7c35b..af10462d44d4 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro | |||
253 | return ret; | 253 | return ret; |
254 | } | 254 | } |
255 | 255 | ||
256 | /* SLAB cache for partial_page structures */ | 256 | /* SLAB cache for ia64_partial_page structures */ |
257 | struct kmem_cache *partial_page_cachep; | 257 | struct kmem_cache *ia64_partial_page_cachep; |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * init partial_page_list. | 260 | * init ia64_partial_page_list. |
261 | * return 0 means kmalloc fail. | 261 | * return 0 means kmalloc fail. |
262 | */ | 262 | */ |
263 | struct partial_page_list* | 263 | struct ia64_partial_page_list* |
264 | ia32_init_pp_list(void) | 264 | ia32_init_pp_list(void) |
265 | { | 265 | { |
266 | struct partial_page_list *p; | 266 | struct ia64_partial_page_list *p; |
267 | 267 | ||
268 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) | 268 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) |
269 | return p; | 269 | return p; |
@@ -280,12 +280,12 @@ ia32_init_pp_list(void) | |||
280 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to | 280 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to |
281 | * be used by later __ia32_insert_pp(). | 281 | * be used by later __ia32_insert_pp(). |
282 | */ | 282 | */ |
283 | static struct partial_page * | 283 | static struct ia64_partial_page * |
284 | __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | 284 | __ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start, |
285 | struct partial_page **pprev, struct rb_node ***rb_link, | 285 | struct ia64_partial_page **pprev, struct rb_node ***rb_link, |
286 | struct rb_node **rb_parent) | 286 | struct rb_node **rb_parent) |
287 | { | 287 | { |
288 | struct partial_page *pp; | 288 | struct ia64_partial_page *pp; |
289 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; | 289 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; |
290 | 290 | ||
291 | pp = ppl->pp_hint; | 291 | pp = ppl->pp_hint; |
@@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | |||
297 | 297 | ||
298 | while (*__rb_link) { | 298 | while (*__rb_link) { |
299 | __rb_parent = *__rb_link; | 299 | __rb_parent = *__rb_link; |
300 | pp = rb_entry(__rb_parent, struct partial_page, pp_rb); | 300 | pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb); |
301 | 301 | ||
302 | if (pp->base == start) { | 302 | if (pp->base == start) { |
303 | ppl->pp_hint = pp; | 303 | ppl->pp_hint = pp; |
@@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | |||
314 | *rb_parent = __rb_parent; | 314 | *rb_parent = __rb_parent; |
315 | *pprev = NULL; | 315 | *pprev = NULL; |
316 | if (rb_prev) | 316 | if (rb_prev) |
317 | *pprev = rb_entry(rb_prev, struct partial_page, pp_rb); | 317 | *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb); |
318 | return NULL; | 318 | return NULL; |
319 | } | 319 | } |
320 | 320 | ||
@@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | |||
322 | * insert @pp into @ppl. | 322 | * insert @pp into @ppl. |
323 | */ | 323 | */ |
324 | static void | 324 | static void |
325 | __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | 325 | __ia32_insert_pp(struct ia64_partial_page_list *ppl, |
326 | struct partial_page *prev, struct rb_node **rb_link, | 326 | struct ia64_partial_page *pp, struct ia64_partial_page *prev, |
327 | struct rb_node *rb_parent) | 327 | struct rb_node **rb_link, struct rb_node *rb_parent) |
328 | { | 328 | { |
329 | /* link list */ | 329 | /* link list */ |
330 | if (prev) { | 330 | if (prev) { |
@@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | |||
334 | ppl->pp_head = pp; | 334 | ppl->pp_head = pp; |
335 | if (rb_parent) | 335 | if (rb_parent) |
336 | pp->next = rb_entry(rb_parent, | 336 | pp->next = rb_entry(rb_parent, |
337 | struct partial_page, pp_rb); | 337 | struct ia64_partial_page, pp_rb); |
338 | else | 338 | else |
339 | pp->next = NULL; | 339 | pp->next = NULL; |
340 | } | 340 | } |
@@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | |||
350 | * delete @pp from partial page list @ppl. | 350 | * delete @pp from partial page list @ppl. |
351 | */ | 351 | */ |
352 | static void | 352 | static void |
353 | __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, | 353 | __ia32_delete_pp(struct ia64_partial_page_list *ppl, |
354 | struct partial_page *prev) | 354 | struct ia64_partial_page *pp, struct ia64_partial_page *prev) |
355 | { | 355 | { |
356 | if (prev) { | 356 | if (prev) { |
357 | prev->next = pp->next; | 357 | prev->next = pp->next; |
@@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, | |||
363 | ppl->pp_hint = pp->next; | 363 | ppl->pp_hint = pp->next; |
364 | } | 364 | } |
365 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); | 365 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); |
366 | kmem_cache_free(partial_page_cachep, pp); | 366 | kmem_cache_free(ia64_partial_page_cachep, pp); |
367 | } | 367 | } |
368 | 368 | ||
369 | static struct partial_page * | 369 | static struct ia64_partial_page * |
370 | __pp_prev(struct partial_page *pp) | 370 | __pp_prev(struct ia64_partial_page *pp) |
371 | { | 371 | { |
372 | struct rb_node *prev = rb_prev(&pp->pp_rb); | 372 | struct rb_node *prev = rb_prev(&pp->pp_rb); |
373 | if (prev) | 373 | if (prev) |
374 | return rb_entry(prev, struct partial_page, pp_rb); | 374 | return rb_entry(prev, struct ia64_partial_page, pp_rb); |
375 | else | 375 | else |
376 | return NULL; | 376 | return NULL; |
377 | } | 377 | } |
@@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp) | |||
383 | static void | 383 | static void |
384 | __ia32_delete_pp_range(unsigned int start, unsigned int end) | 384 | __ia32_delete_pp_range(unsigned int start, unsigned int end) |
385 | { | 385 | { |
386 | struct partial_page *pp, *prev; | 386 | struct ia64_partial_page *pp, *prev; |
387 | struct rb_node **rb_link, *rb_parent; | 387 | struct rb_node **rb_link, *rb_parent; |
388 | 388 | ||
389 | if (start >= end) | 389 | if (start >= end) |
@@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end) | |||
401 | } | 401 | } |
402 | 402 | ||
403 | while (pp && pp->base < end) { | 403 | while (pp && pp->base < end) { |
404 | struct partial_page *tmp = pp->next; | 404 | struct ia64_partial_page *tmp = pp->next; |
405 | __ia32_delete_pp(current->thread.ppl, pp, prev); | 405 | __ia32_delete_pp(current->thread.ppl, pp, prev); |
406 | pp = tmp; | 406 | pp = tmp; |
407 | } | 407 | } |
@@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end) | |||
414 | static int | 414 | static int |
415 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) | 415 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) |
416 | { | 416 | { |
417 | struct partial_page *pp, *prev; | 417 | struct ia64_partial_page *pp, *prev; |
418 | struct rb_node ** rb_link, *rb_parent; | 418 | struct rb_node ** rb_link, *rb_parent; |
419 | unsigned int pstart, start_bit, end_bit, i; | 419 | unsigned int pstart, start_bit, end_bit, i; |
420 | 420 | ||
@@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags) | |||
450 | return 0; | 450 | return 0; |
451 | } | 451 | } |
452 | 452 | ||
453 | /* new a partial_page */ | 453 | /* new a ia64_partial_page */ |
454 | pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 454 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
455 | if (!pp) | 455 | if (!pp) |
456 | return -ENOMEM; | 456 | return -ENOMEM; |
457 | pp->base = pstart; | 457 | pp->base = pstart; |
@@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags) | |||
504 | static int | 504 | static int |
505 | __ia32_unset_pp(unsigned int start, unsigned int end) | 505 | __ia32_unset_pp(unsigned int start, unsigned int end) |
506 | { | 506 | { |
507 | struct partial_page *pp, *prev; | 507 | struct ia64_partial_page *pp, *prev; |
508 | struct rb_node ** rb_link, *rb_parent; | 508 | struct rb_node ** rb_link, *rb_parent; |
509 | unsigned int pstart, start_bit, end_bit, i; | 509 | unsigned int pstart, start_bit, end_bit, i; |
510 | struct vm_area_struct *vma; | 510 | struct vm_area_struct *vma; |
@@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end) | |||
532 | return -ENOMEM; | 532 | return -ENOMEM; |
533 | } | 533 | } |
534 | 534 | ||
535 | /* new a partial_page */ | 535 | /* new a ia64_partial_page */ |
536 | pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 536 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
537 | if (!pp) | 537 | if (!pp) |
538 | return -ENOMEM; | 538 | return -ENOMEM; |
539 | pp->base = pstart; | 539 | pp->base = pstart; |
@@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp) | |||
605 | static int | 605 | static int |
606 | __ia32_compare_pp(unsigned int start, unsigned int end) | 606 | __ia32_compare_pp(unsigned int start, unsigned int end) |
607 | { | 607 | { |
608 | struct partial_page *pp, *prev; | 608 | struct ia64_partial_page *pp, *prev; |
609 | struct rb_node ** rb_link, *rb_parent; | 609 | struct rb_node ** rb_link, *rb_parent; |
610 | unsigned int pstart, start_bit, end_bit, size; | 610 | unsigned int pstart, start_bit, end_bit, size; |
611 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ | 611 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ |
@@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp) | |||
682 | } | 682 | } |
683 | 683 | ||
684 | static void | 684 | static void |
685 | __ia32_drop_pp_list(struct partial_page_list *ppl) | 685 | __ia32_drop_pp_list(struct ia64_partial_page_list *ppl) |
686 | { | 686 | { |
687 | struct partial_page *pp = ppl->pp_head; | 687 | struct ia64_partial_page *pp = ppl->pp_head; |
688 | 688 | ||
689 | while (pp) { | 689 | while (pp) { |
690 | struct partial_page *next = pp->next; | 690 | struct ia64_partial_page *next = pp->next; |
691 | kmem_cache_free(partial_page_cachep, pp); | 691 | kmem_cache_free(ia64_partial_page_cachep, pp); |
692 | pp = next; | 692 | pp = next; |
693 | } | 693 | } |
694 | 694 | ||
@@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl) | |||
696 | } | 696 | } |
697 | 697 | ||
698 | void | 698 | void |
699 | ia32_drop_partial_page_list(struct task_struct *task) | 699 | ia32_drop_ia64_partial_page_list(struct task_struct *task) |
700 | { | 700 | { |
701 | struct partial_page_list* ppl = task->thread.ppl; | 701 | struct ia64_partial_page_list* ppl = task->thread.ppl; |
702 | 702 | ||
703 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) | 703 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) |
704 | __ia32_drop_pp_list(ppl); | 704 | __ia32_drop_pp_list(ppl); |
@@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task) | |||
708 | * Copy current->thread.ppl to ppl (already initialized). | 708 | * Copy current->thread.ppl to ppl (already initialized). |
709 | */ | 709 | */ |
710 | static int | 710 | static int |
711 | __ia32_copy_pp_list(struct partial_page_list *ppl) | 711 | __ia32_copy_pp_list(struct ia64_partial_page_list *ppl) |
712 | { | 712 | { |
713 | struct partial_page *pp, *tmp, *prev; | 713 | struct ia64_partial_page *pp, *tmp, *prev; |
714 | struct rb_node **rb_link, *rb_parent; | 714 | struct rb_node **rb_link, *rb_parent; |
715 | 715 | ||
716 | ppl->pp_head = NULL; | 716 | ppl->pp_head = NULL; |
@@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl) | |||
721 | prev = NULL; | 721 | prev = NULL; |
722 | 722 | ||
723 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { | 723 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { |
724 | tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 724 | tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
725 | if (!tmp) | 725 | if (!tmp) |
726 | return -ENOMEM; | 726 | return -ENOMEM; |
727 | *tmp = *pp; | 727 | *tmp = *pp; |
@@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl) | |||
734 | } | 734 | } |
735 | 735 | ||
736 | int | 736 | int |
737 | ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags) | 737 | ia32_copy_ia64_partial_page_list(struct task_struct *p, |
738 | unsigned long clone_flags) | ||
738 | { | 739 | { |
739 | int retval = 0; | 740 | int retval = 0; |
740 | 741 | ||
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 44d540efa6d1..4e5e27540e27 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -178,7 +178,7 @@ swapper_pg_dir: | |||
178 | halt_msg: | 178 | halt_msg: |
179 | stringz "Halting kernel\n" | 179 | stringz "Halting kernel\n" |
180 | 180 | ||
181 | .text | 181 | .section .text.head,"ax" |
182 | 182 | ||
183 | .global start_ap | 183 | .global start_ap |
184 | 184 | ||
@@ -392,6 +392,8 @@ self: hint @pause | |||
392 | br.sptk.many self // endless loop | 392 | br.sptk.many self // endless loop |
393 | END(_start) | 393 | END(_start) |
394 | 394 | ||
395 | .text | ||
396 | |||
395 | GLOBAL_ENTRY(ia64_save_debug_regs) | 397 | GLOBAL_ENTRY(ia64_save_debug_regs) |
396 | alloc r16=ar.pfs,1,0,0,0 | 398 | alloc r16=ar.pfs,1,0,0,0 |
397 | mov r20=ar.lc // preserve ar.lc | 399 | mov r20=ar.lc // preserve ar.lc |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 91797c111162..fcb77338cc09 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -85,8 +85,8 @@ DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { | |||
85 | [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR | 85 | [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = { | 88 | static cpumask_t vector_table[IA64_NUM_VECTORS] = { |
89 | [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE | 89 | [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static int irq_status[NR_IRQS] = { | 92 | static int irq_status[NR_IRQS] = { |
@@ -123,17 +123,18 @@ static inline int find_unassigned_irq(void) | |||
123 | static inline int find_unassigned_vector(cpumask_t domain) | 123 | static inline int find_unassigned_vector(cpumask_t domain) |
124 | { | 124 | { |
125 | cpumask_t mask; | 125 | cpumask_t mask; |
126 | int pos; | 126 | int pos, vector; |
127 | 127 | ||
128 | cpus_and(mask, domain, cpu_online_map); | 128 | cpus_and(mask, domain, cpu_online_map); |
129 | if (cpus_empty(mask)) | 129 | if (cpus_empty(mask)) |
130 | return -EINVAL; | 130 | return -EINVAL; |
131 | 131 | ||
132 | for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { | 132 | for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { |
133 | cpus_and(mask, domain, vector_table[pos]); | 133 | vector = IA64_FIRST_DEVICE_VECTOR + pos; |
134 | cpus_and(mask, domain, vector_table[vector]); | ||
134 | if (!cpus_empty(mask)) | 135 | if (!cpus_empty(mask)) |
135 | continue; | 136 | continue; |
136 | return IA64_FIRST_DEVICE_VECTOR + pos; | 137 | return vector; |
137 | } | 138 | } |
138 | return -ENOSPC; | 139 | return -ENOSPC; |
139 | } | 140 | } |
@@ -141,7 +142,7 @@ static inline int find_unassigned_vector(cpumask_t domain) | |||
141 | static int __bind_irq_vector(int irq, int vector, cpumask_t domain) | 142 | static int __bind_irq_vector(int irq, int vector, cpumask_t domain) |
142 | { | 143 | { |
143 | cpumask_t mask; | 144 | cpumask_t mask; |
144 | int cpu, pos; | 145 | int cpu; |
145 | struct irq_cfg *cfg = &irq_cfg[irq]; | 146 | struct irq_cfg *cfg = &irq_cfg[irq]; |
146 | 147 | ||
147 | cpus_and(mask, domain, cpu_online_map); | 148 | cpus_and(mask, domain, cpu_online_map); |
@@ -156,8 +157,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain) | |||
156 | cfg->vector = vector; | 157 | cfg->vector = vector; |
157 | cfg->domain = domain; | 158 | cfg->domain = domain; |
158 | irq_status[irq] = IRQ_USED; | 159 | irq_status[irq] = IRQ_USED; |
159 | pos = vector - IA64_FIRST_DEVICE_VECTOR; | 160 | cpus_or(vector_table[vector], vector_table[vector], domain); |
160 | cpus_or(vector_table[pos], vector_table[pos], domain); | ||
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
163 | 163 | ||
@@ -174,7 +174,7 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain) | |||
174 | 174 | ||
175 | static void __clear_irq_vector(int irq) | 175 | static void __clear_irq_vector(int irq) |
176 | { | 176 | { |
177 | int vector, cpu, pos; | 177 | int vector, cpu; |
178 | cpumask_t mask; | 178 | cpumask_t mask; |
179 | cpumask_t domain; | 179 | cpumask_t domain; |
180 | struct irq_cfg *cfg = &irq_cfg[irq]; | 180 | struct irq_cfg *cfg = &irq_cfg[irq]; |
@@ -189,8 +189,7 @@ static void __clear_irq_vector(int irq) | |||
189 | cfg->vector = IRQ_VECTOR_UNASSIGNED; | 189 | cfg->vector = IRQ_VECTOR_UNASSIGNED; |
190 | cfg->domain = CPU_MASK_NONE; | 190 | cfg->domain = CPU_MASK_NONE; |
191 | irq_status[irq] = IRQ_UNUSED; | 191 | irq_status[irq] = IRQ_UNUSED; |
192 | pos = vector - IA64_FIRST_DEVICE_VECTOR; | 192 | cpus_andnot(vector_table[vector], vector_table[vector], domain); |
193 | cpus_andnot(vector_table[pos], vector_table[pos], domain); | ||
194 | } | 193 | } |
195 | 194 | ||
196 | static void clear_irq_vector(int irq) | 195 | static void clear_irq_vector(int irq) |
@@ -212,9 +211,6 @@ assign_irq_vector (int irq) | |||
212 | vector = -ENOSPC; | 211 | vector = -ENOSPC; |
213 | 212 | ||
214 | spin_lock_irqsave(&vector_lock, flags); | 213 | spin_lock_irqsave(&vector_lock, flags); |
215 | if (irq < 0) { | ||
216 | goto out; | ||
217 | } | ||
218 | for_each_online_cpu(cpu) { | 214 | for_each_online_cpu(cpu) { |
219 | domain = vector_allocation_domain(cpu); | 215 | domain = vector_allocation_domain(cpu); |
220 | vector = find_unassigned_vector(domain); | 216 | vector = find_unassigned_vector(domain); |
@@ -223,6 +219,8 @@ assign_irq_vector (int irq) | |||
223 | } | 219 | } |
224 | if (vector < 0) | 220 | if (vector < 0) |
225 | goto out; | 221 | goto out; |
222 | if (irq == AUTO_ASSIGN) | ||
223 | irq = vector; | ||
226 | BUG_ON(__bind_irq_vector(irq, vector, domain)); | 224 | BUG_ON(__bind_irq_vector(irq, vector, domain)); |
227 | out: | 225 | out: |
228 | spin_unlock_irqrestore(&vector_lock, flags); | 226 | spin_unlock_irqrestore(&vector_lock, flags); |
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 13df337508e7..7ccb228ceedc 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c | |||
@@ -13,14 +13,6 @@ | |||
13 | struct ia64_machine_vector ia64_mv; | 13 | struct ia64_machine_vector ia64_mv; |
14 | EXPORT_SYMBOL(ia64_mv); | 14 | EXPORT_SYMBOL(ia64_mv); |
15 | 15 | ||
16 | static __initdata const char *mvec_name; | ||
17 | static __init int setup_mvec(char *s) | ||
18 | { | ||
19 | mvec_name = s; | ||
20 | return 0; | ||
21 | } | ||
22 | early_param("machvec", setup_mvec); | ||
23 | |||
24 | static struct ia64_machine_vector * __init | 16 | static struct ia64_machine_vector * __init |
25 | lookup_machvec (const char *name) | 17 | lookup_machvec (const char *name) |
26 | { | 18 | { |
@@ -41,7 +33,7 @@ machvec_init (const char *name) | |||
41 | struct ia64_machine_vector *mv; | 33 | struct ia64_machine_vector *mv; |
42 | 34 | ||
43 | if (!name) | 35 | if (!name) |
44 | name = mvec_name ? mvec_name : acpi_get_sysname(); | 36 | name = acpi_get_sysname(); |
45 | mv = lookup_machvec(name); | 37 | mv = lookup_machvec(name); |
46 | if (!mv) | 38 | if (!mv) |
47 | panic("generic kernel failed to find machine vector for" | 39 | panic("generic kernel failed to find machine vector for" |
@@ -51,6 +43,23 @@ machvec_init (const char *name) | |||
51 | printk(KERN_INFO "booting generic kernel on platform %s\n", name); | 43 | printk(KERN_INFO "booting generic kernel on platform %s\n", name); |
52 | } | 44 | } |
53 | 45 | ||
46 | void __init | ||
47 | machvec_init_from_cmdline(const char *cmdline) | ||
48 | { | ||
49 | char str[64]; | ||
50 | const char *start; | ||
51 | char *end; | ||
52 | |||
53 | if (! (start = strstr(cmdline, "machvec=")) ) | ||
54 | return machvec_init(NULL); | ||
55 | |||
56 | strlcpy(str, start + strlen("machvec="), sizeof(str)); | ||
57 | if ( (end = strchr(str, ' ')) ) | ||
58 | *end = '\0'; | ||
59 | |||
60 | return machvec_init(str); | ||
61 | } | ||
62 | |||
54 | #endif /* CONFIG_IA64_GENERIC */ | 63 | #endif /* CONFIG_IA64_GENERIC */ |
55 | 64 | ||
56 | void | 65 | void |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index fa40cba43350..4158906c45aa 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags, | |||
499 | 499 | ||
500 | /* Copy partially mapped page list */ | 500 | /* Copy partially mapped page list */ |
501 | if (!retval) | 501 | if (!retval) |
502 | retval = ia32_copy_partial_page_list(p, clone_flags); | 502 | retval = ia32_copy_ia64_partial_page_list(p, |
503 | clone_flags); | ||
503 | } | 504 | } |
504 | #endif | 505 | #endif |
505 | 506 | ||
@@ -728,7 +729,7 @@ flush_thread (void) | |||
728 | ia64_drop_fpu(current); | 729 | ia64_drop_fpu(current); |
729 | #ifdef CONFIG_IA32_SUPPORT | 730 | #ifdef CONFIG_IA32_SUPPORT |
730 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | 731 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
731 | ia32_drop_partial_page_list(current); | 732 | ia32_drop_ia64_partial_page_list(current); |
732 | current->thread.task_size = IA32_PAGE_OFFSET; | 733 | current->thread.task_size = IA32_PAGE_OFFSET; |
733 | set_fs(USER_DS); | 734 | set_fs(USER_DS); |
734 | } | 735 | } |
@@ -754,7 +755,7 @@ exit_thread (void) | |||
754 | pfm_release_debug_registers(current); | 755 | pfm_release_debug_registers(current); |
755 | #endif | 756 | #endif |
756 | if (IS_IA32_PROCESS(task_pt_regs(current))) | 757 | if (IS_IA32_PROCESS(task_pt_regs(current))) |
757 | ia32_drop_partial_page_list(current); | 758 | ia32_drop_ia64_partial_page_list(current); |
758 | } | 759 | } |
759 | 760 | ||
760 | unsigned long | 761 | unsigned long |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index cf06fe799041..7cecd2964200 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -491,12 +491,17 @@ setup_arch (char **cmdline_p) | |||
491 | efi_init(); | 491 | efi_init(); |
492 | io_port_init(); | 492 | io_port_init(); |
493 | 493 | ||
494 | parse_early_param(); | ||
495 | |||
496 | #ifdef CONFIG_IA64_GENERIC | 494 | #ifdef CONFIG_IA64_GENERIC |
497 | machvec_init(NULL); | 495 | /* machvec needs to be parsed from the command line |
496 | * before parse_early_param() is called to ensure | ||
497 | * that ia64_mv is initialised before any command line | ||
498 | * settings may cause console setup to occur | ||
499 | */ | ||
500 | machvec_init_from_cmdline(*cmdline_p); | ||
498 | #endif | 501 | #endif |
499 | 502 | ||
503 | parse_early_param(); | ||
504 | |||
500 | if (early_console_setup(*cmdline_p) == 0) | 505 | if (early_console_setup(*cmdline_p) == 0) |
501 | mark_bsp_online(); | 506 | mark_bsp_online(); |
502 | 507 | ||
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 9f72838db26e..0982882bfb80 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -468,7 +468,7 @@ smp_send_stop (void) | |||
468 | send_IPI_allbutself(IPI_CPU_STOP); | 468 | send_IPI_allbutself(IPI_CPU_STOP); |
469 | } | 469 | } |
470 | 470 | ||
471 | int __init | 471 | int |
472 | setup_profiling_timer (unsigned int multiplier) | 472 | setup_profiling_timer (unsigned int multiplier) |
473 | { | 473 | { |
474 | return -EINVAL; | 474 | return -EINVAL; |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 9f5c90b594b9..62209dcf06d3 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -487,7 +487,7 @@ struct create_idle { | |||
487 | int cpu; | 487 | int cpu; |
488 | }; | 488 | }; |
489 | 489 | ||
490 | void | 490 | void __cpuinit |
491 | do_fork_idle(struct work_struct *work) | 491 | do_fork_idle(struct work_struct *work) |
492 | { | 492 | { |
493 | struct create_idle *c_idle = | 493 | struct create_idle *c_idle = |
@@ -497,7 +497,7 @@ do_fork_idle(struct work_struct *work) | |||
497 | complete(&c_idle->done); | 497 | complete(&c_idle->done); |
498 | } | 498 | } |
499 | 499 | ||
500 | static int __devinit | 500 | static int __cpuinit |
501 | do_boot_cpu (int sapicid, int cpu) | 501 | do_boot_cpu (int sapicid, int cpu) |
502 | { | 502 | { |
503 | int timeout; | 503 | int timeout; |
@@ -808,7 +808,7 @@ set_cpu_sibling_map(int cpu) | |||
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
811 | int __devinit | 811 | int __cpuinit |
812 | __cpu_up (unsigned int cpu) | 812 | __cpu_up (unsigned int cpu) |
813 | { | 813 | { |
814 | int ret; | 814 | int ret; |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 860f251d2fc2..83e80677de70 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -50,6 +50,8 @@ SECTIONS | |||
50 | KPROBES_TEXT | 50 | KPROBES_TEXT |
51 | *(.gnu.linkonce.t*) | 51 | *(.gnu.linkonce.t*) |
52 | } | 52 | } |
53 | .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) | ||
54 | { *(.text.head) } | ||
53 | .text2 : AT(ADDR(.text2) - LOAD_OFFSET) | 55 | .text2 : AT(ADDR(.text2) - LOAD_OFFSET) |
54 | { *(.text2) } | 56 | { *(.text2) } |
55 | #ifdef CONFIG_SMP | 57 | #ifdef CONFIG_SMP |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 07d0e92742c8..488e48a5deea 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -581,7 +581,7 @@ pcibios_align_resource (void *data, struct resource *res, | |||
581 | /* | 581 | /* |
582 | * PCI BIOS setup, always defaults to SAL interface | 582 | * PCI BIOS setup, always defaults to SAL interface |
583 | */ | 583 | */ |
584 | char * __init | 584 | char * __devinit |
585 | pcibios_setup (char *str) | 585 | pcibios_setup (char *str) |
586 | { | 586 | { |
587 | return str; | 587 | return str; |
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h index 5ff8d74c3e00..2390ee145aa1 100644 --- a/include/asm-ia64/ia32.h +++ b/include/asm-ia64/ia32.h | |||
@@ -27,11 +27,12 @@ extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs) | |||
27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, | 27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, |
28 | sigset_t *set, struct pt_regs *regs); | 28 | sigset_t *set, struct pt_regs *regs); |
29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
30 | extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long); | 30 | extern int ia32_copy_ia64_partial_page_list(struct task_struct *, |
31 | extern void ia32_drop_partial_page_list (struct task_struct *); | 31 | unsigned long); |
32 | extern void ia32_drop_ia64_partial_page_list(struct task_struct *); | ||
32 | #else | 33 | #else |
33 | # define ia32_copy_partial_page_list(a1, a2) 0 | 34 | # define ia32_copy_ia64_partial_page_list(a1, a2) 0 |
34 | # define ia32_drop_partial_page_list(a1) do { ; } while (0) | 35 | # define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0) |
35 | #endif | 36 | #endif |
36 | 37 | ||
37 | #endif /* !__ASSEMBLY__ */ | 38 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index ca33eb181ff2..5cf8bf1e805e 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
@@ -275,6 +275,7 @@ struct ia64_machine_vector { | |||
275 | 275 | ||
276 | extern struct ia64_machine_vector ia64_mv; | 276 | extern struct ia64_machine_vector ia64_mv; |
277 | extern void machvec_init (const char *name); | 277 | extern void machvec_init (const char *name); |
278 | extern void machvec_init_from_cmdline(const char *cmdline); | ||
278 | 279 | ||
279 | # else | 280 | # else |
280 | # error Unknown configuration. Update asm-ia64/machvec.h. | 281 | # error Unknown configuration. Update asm-ia64/machvec.h. |
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 6251c76437d2..be3b0ae43270 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h | |||
@@ -220,7 +220,7 @@ struct desc_struct { | |||
220 | 220 | ||
221 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | 221 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
222 | 222 | ||
223 | struct partial_page_list; | 223 | struct ia64_partial_page_list; |
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | struct thread_struct { | 226 | struct thread_struct { |
@@ -242,7 +242,7 @@ struct thread_struct { | |||
242 | __u64 fdr; /* IA32 fp except. data reg */ | 242 | __u64 fdr; /* IA32 fp except. data reg */ |
243 | __u64 old_k1; /* old value of ar.k1 */ | 243 | __u64 old_k1; /* old value of ar.k1 */ |
244 | __u64 old_iob; /* old IOBase value */ | 244 | __u64 old_iob; /* old IOBase value */ |
245 | struct partial_page_list *ppl; /* partial page list for 4K page size issue */ | 245 | struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */ |
246 | /* cached TLS descriptors. */ | 246 | /* cached TLS descriptors. */ |
247 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 247 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
248 | 248 | ||
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index c60024989ebd..6314b29e8c4d 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
@@ -116,7 +116,6 @@ max_xtp (void) | |||
116 | extern int __cpu_disable (void); | 116 | extern int __cpu_disable (void); |
117 | extern void __cpu_die (unsigned int cpu); | 117 | extern void __cpu_die (unsigned int cpu); |
118 | extern void cpu_die (void) __attribute__ ((noreturn)); | 118 | extern void cpu_die (void) __attribute__ ((noreturn)); |
119 | extern int __cpu_up (unsigned int cpu); | ||
120 | extern void __init smp_build_cpu_map(void); | 119 | extern void __init smp_build_cpu_map(void); |
121 | 120 | ||
122 | extern void __init init_smp_config (void); | 121 | extern void __init init_smp_config (void); |