aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/hp/common/sba_iommu.c7
-rw-r--r--arch/ia64/hp/sim/simscsi.c2
-rw-r--r--arch/ia64/ia32/ia32_support.c8
-rw-r--r--arch/ia64/ia32/ia32priv.h12
-rw-r--r--arch/ia64/ia32/sys_ia32.c81
-rw-r--r--arch/ia64/kernel/acpi.c28
-rw-r--r--arch/ia64/kernel/cyclone.c14
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/ia64/kernel/irq_ia64.c31
-rw-r--r--arch/ia64/kernel/machvec.c27
-rw-r--r--arch/ia64/kernel/process.c7
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/pci/pci.c2
19 files changed, 150 insertions, 102 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 36c7b9682aa6..21aa4fc5f8ef 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -425,6 +425,9 @@ config COMPAT
425 depends on IA32_SUPPORT 425 depends on IA32_SUPPORT
426 default y 426 default y
427 427
428config COMPAT_FOR_U64_ALIGNMENT
429 def_bool COMPAT
430
428config IA64_MCA_RECOVERY 431config IA64_MCA_RECOVERY
429 tristate "MCA recovery from errors other than TLB." 432 tristate "MCA recovery from errors other than TLB."
430 433
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index cd4adf52f174..e980e7aa2306 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2015,9 +2015,14 @@ acpi_sba_ioc_add(struct acpi_device *device)
2015 return 0; 2015 return 0;
2016} 2016}
2017 2017
2018static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2019 {"HWP0001", 0},
2020 {"HWP0004", 0},
2021 {"", 0},
2022};
2018static struct acpi_driver acpi_sba_ioc_driver = { 2023static struct acpi_driver acpi_sba_ioc_driver = {
2019 .name = "IOC IOMMU Driver", 2024 .name = "IOC IOMMU Driver",
2020 .ids = "HWP0001,HWP0004", 2025 .ids = hp_ioc_iommu_device_ids,
2021 .ops = { 2026 .ops = {
2022 .add = acpi_sba_ioc_add, 2027 .add = acpi_sba_ioc_add,
2023 }, 2028 },
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index decdf6e1e5d0..e62694f8ef75 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -101,7 +101,7 @@ simscsi_interrupt (unsigned long val)
101{ 101{
102 struct scsi_cmnd *sc; 102 struct scsi_cmnd *sc;
103 103
104 while ((sc = queue[rd].sc) != 0) { 104 while ((sc = queue[rd].sc) != NULL) {
105 atomic_dec(&num_reqs); 105 atomic_dec(&num_reqs);
106 queue[rd].sc = NULL; 106 queue[rd].sc = NULL;
107 if (DBG) 107 if (DBG)
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index e13a1a1db4b5..d1d50cd1c38a 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,11 +249,11 @@ ia32_init (void)
249 249
250#if PAGE_SHIFT > IA32_PAGE_SHIFT 250#if PAGE_SHIFT > IA32_PAGE_SHIFT
251 { 251 {
252 extern struct kmem_cache *partial_page_cachep; 252 extern struct kmem_cache *ia64_partial_page_cachep;
253 253
254 partial_page_cachep = kmem_cache_create("partial_page_cache", 254 ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
255 sizeof(struct partial_page), 255 sizeof(struct ia64_partial_page),
256 0, SLAB_PANIC, NULL); 256 0, SLAB_PANIC, NULL);
257 } 257 }
258#endif 258#endif
259 return 0; 259 return 0;
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index cfa0bc0026b5..466bbcb138b2 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -25,8 +25,8 @@
25 * partially mapped pages provide precise accounting of which 4k sub pages 25 * partially mapped pages provide precise accounting of which 4k sub pages
26 * are mapped and which ones are not, thereby improving IA-32 compatibility. 26 * are mapped and which ones are not, thereby improving IA-32 compatibility.
27 */ 27 */
28struct partial_page { 28struct ia64_partial_page {
29 struct partial_page *next; /* linked list, sorted by address */ 29 struct ia64_partial_page *next; /* linked list, sorted by address */
30 struct rb_node pp_rb; 30 struct rb_node pp_rb;
31 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 31 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
32 * should suffice.*/ 32 * should suffice.*/
@@ -34,17 +34,17 @@ struct partial_page {
34 unsigned int base; 34 unsigned int base;
35}; 35};
36 36
37struct partial_page_list { 37struct ia64_partial_page_list {
38 struct partial_page *pp_head; /* list head, points to the lowest 38 struct ia64_partial_page *pp_head; /* list head, points to the lowest
39 * addressed partial page */ 39 * addressed partial page */
40 struct rb_root ppl_rb; 40 struct rb_root ppl_rb;
41 struct partial_page *pp_hint; /* pp_hint->next is the last 41 struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
42 * accessed partial page */ 42 * accessed partial page */
43 atomic_t pp_count; /* reference count */ 43 atomic_t pp_count; /* reference count */
44}; 44};
45 45
46#if PAGE_SHIFT > IA32_PAGE_SHIFT 46#if PAGE_SHIFT > IA32_PAGE_SHIFT
47struct partial_page_list* ia32_init_pp_list (void); 47struct ia64_partial_page_list* ia32_init_pp_list (void);
48#else 48#else
49# define ia32_init_pp_list() 0 49# define ia32_init_pp_list() 0
50#endif 50#endif
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 0afb4fe7c35b..af10462d44d4 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
253 return ret; 253 return ret;
254} 254}
255 255
256/* SLAB cache for partial_page structures */ 256/* SLAB cache for ia64_partial_page structures */
257struct kmem_cache *partial_page_cachep; 257struct kmem_cache *ia64_partial_page_cachep;
258 258
259/* 259/*
260 * init partial_page_list. 260 * init ia64_partial_page_list.
261 * return 0 means kmalloc fail. 261 * return 0 means kmalloc fail.
262 */ 262 */
263struct partial_page_list* 263struct ia64_partial_page_list*
264ia32_init_pp_list(void) 264ia32_init_pp_list(void)
265{ 265{
266 struct partial_page_list *p; 266 struct ia64_partial_page_list *p;
267 267
268 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) 268 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
269 return p; 269 return p;
@@ -280,12 +280,12 @@ ia32_init_pp_list(void)
280 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to 280 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
281 * be used by later __ia32_insert_pp(). 281 * be used by later __ia32_insert_pp().
282 */ 282 */
283static struct partial_page * 283static struct ia64_partial_page *
284__ia32_find_pp(struct partial_page_list *ppl, unsigned int start, 284__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
285 struct partial_page **pprev, struct rb_node ***rb_link, 285 struct ia64_partial_page **pprev, struct rb_node ***rb_link,
286 struct rb_node **rb_parent) 286 struct rb_node **rb_parent)
287{ 287{
288 struct partial_page *pp; 288 struct ia64_partial_page *pp;
289 struct rb_node **__rb_link, *__rb_parent, *rb_prev; 289 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
290 290
291 pp = ppl->pp_hint; 291 pp = ppl->pp_hint;
@@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
297 297
298 while (*__rb_link) { 298 while (*__rb_link) {
299 __rb_parent = *__rb_link; 299 __rb_parent = *__rb_link;
300 pp = rb_entry(__rb_parent, struct partial_page, pp_rb); 300 pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
301 301
302 if (pp->base == start) { 302 if (pp->base == start) {
303 ppl->pp_hint = pp; 303 ppl->pp_hint = pp;
@@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
314 *rb_parent = __rb_parent; 314 *rb_parent = __rb_parent;
315 *pprev = NULL; 315 *pprev = NULL;
316 if (rb_prev) 316 if (rb_prev)
317 *pprev = rb_entry(rb_prev, struct partial_page, pp_rb); 317 *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
318 return NULL; 318 return NULL;
319} 319}
320 320
@@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
322 * insert @pp into @ppl. 322 * insert @pp into @ppl.
323 */ 323 */
324static void 324static void
325__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, 325__ia32_insert_pp(struct ia64_partial_page_list *ppl,
326 struct partial_page *prev, struct rb_node **rb_link, 326 struct ia64_partial_page *pp, struct ia64_partial_page *prev,
327 struct rb_node *rb_parent) 327 struct rb_node **rb_link, struct rb_node *rb_parent)
328{ 328{
329 /* link list */ 329 /* link list */
330 if (prev) { 330 if (prev) {
@@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
334 ppl->pp_head = pp; 334 ppl->pp_head = pp;
335 if (rb_parent) 335 if (rb_parent)
336 pp->next = rb_entry(rb_parent, 336 pp->next = rb_entry(rb_parent,
337 struct partial_page, pp_rb); 337 struct ia64_partial_page, pp_rb);
338 else 338 else
339 pp->next = NULL; 339 pp->next = NULL;
340 } 340 }
@@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
350 * delete @pp from partial page list @ppl. 350 * delete @pp from partial page list @ppl.
351 */ 351 */
352static void 352static void
353__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, 353__ia32_delete_pp(struct ia64_partial_page_list *ppl,
354 struct partial_page *prev) 354 struct ia64_partial_page *pp, struct ia64_partial_page *prev)
355{ 355{
356 if (prev) { 356 if (prev) {
357 prev->next = pp->next; 357 prev->next = pp->next;
@@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
363 ppl->pp_hint = pp->next; 363 ppl->pp_hint = pp->next;
364 } 364 }
365 rb_erase(&pp->pp_rb, &ppl->ppl_rb); 365 rb_erase(&pp->pp_rb, &ppl->ppl_rb);
366 kmem_cache_free(partial_page_cachep, pp); 366 kmem_cache_free(ia64_partial_page_cachep, pp);
367} 367}
368 368
369static struct partial_page * 369static struct ia64_partial_page *
370__pp_prev(struct partial_page *pp) 370__pp_prev(struct ia64_partial_page *pp)
371{ 371{
372 struct rb_node *prev = rb_prev(&pp->pp_rb); 372 struct rb_node *prev = rb_prev(&pp->pp_rb);
373 if (prev) 373 if (prev)
374 return rb_entry(prev, struct partial_page, pp_rb); 374 return rb_entry(prev, struct ia64_partial_page, pp_rb);
375 else 375 else
376 return NULL; 376 return NULL;
377} 377}
@@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp)
383static void 383static void
384__ia32_delete_pp_range(unsigned int start, unsigned int end) 384__ia32_delete_pp_range(unsigned int start, unsigned int end)
385{ 385{
386 struct partial_page *pp, *prev; 386 struct ia64_partial_page *pp, *prev;
387 struct rb_node **rb_link, *rb_parent; 387 struct rb_node **rb_link, *rb_parent;
388 388
389 if (start >= end) 389 if (start >= end)
@@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
401 } 401 }
402 402
403 while (pp && pp->base < end) { 403 while (pp && pp->base < end) {
404 struct partial_page *tmp = pp->next; 404 struct ia64_partial_page *tmp = pp->next;
405 __ia32_delete_pp(current->thread.ppl, pp, prev); 405 __ia32_delete_pp(current->thread.ppl, pp, prev);
406 pp = tmp; 406 pp = tmp;
407 } 407 }
@@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
414static int 414static int
415__ia32_set_pp(unsigned int start, unsigned int end, int flags) 415__ia32_set_pp(unsigned int start, unsigned int end, int flags)
416{ 416{
417 struct partial_page *pp, *prev; 417 struct ia64_partial_page *pp, *prev;
418 struct rb_node ** rb_link, *rb_parent; 418 struct rb_node ** rb_link, *rb_parent;
419 unsigned int pstart, start_bit, end_bit, i; 419 unsigned int pstart, start_bit, end_bit, i;
420 420
@@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags)
450 return 0; 450 return 0;
451 } 451 }
452 452
453 /* new a partial_page */ 453 /* new a ia64_partial_page */
454 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); 454 pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
455 if (!pp) 455 if (!pp)
456 return -ENOMEM; 456 return -ENOMEM;
457 pp->base = pstart; 457 pp->base = pstart;
@@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags)
504static int 504static int
505__ia32_unset_pp(unsigned int start, unsigned int end) 505__ia32_unset_pp(unsigned int start, unsigned int end)
506{ 506{
507 struct partial_page *pp, *prev; 507 struct ia64_partial_page *pp, *prev;
508 struct rb_node ** rb_link, *rb_parent; 508 struct rb_node ** rb_link, *rb_parent;
509 unsigned int pstart, start_bit, end_bit, i; 509 unsigned int pstart, start_bit, end_bit, i;
510 struct vm_area_struct *vma; 510 struct vm_area_struct *vma;
@@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end)
532 return -ENOMEM; 532 return -ENOMEM;
533 } 533 }
534 534
535 /* new a partial_page */ 535 /* new a ia64_partial_page */
536 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); 536 pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
537 if (!pp) 537 if (!pp)
538 return -ENOMEM; 538 return -ENOMEM;
539 pp->base = pstart; 539 pp->base = pstart;
@@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp)
605static int 605static int
606__ia32_compare_pp(unsigned int start, unsigned int end) 606__ia32_compare_pp(unsigned int start, unsigned int end)
607{ 607{
608 struct partial_page *pp, *prev; 608 struct ia64_partial_page *pp, *prev;
609 struct rb_node ** rb_link, *rb_parent; 609 struct rb_node ** rb_link, *rb_parent;
610 unsigned int pstart, start_bit, end_bit, size; 610 unsigned int pstart, start_bit, end_bit, size;
611 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ 611 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
@@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp)
682} 682}
683 683
684static void 684static void
685__ia32_drop_pp_list(struct partial_page_list *ppl) 685__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
686{ 686{
687 struct partial_page *pp = ppl->pp_head; 687 struct ia64_partial_page *pp = ppl->pp_head;
688 688
689 while (pp) { 689 while (pp) {
690 struct partial_page *next = pp->next; 690 struct ia64_partial_page *next = pp->next;
691 kmem_cache_free(partial_page_cachep, pp); 691 kmem_cache_free(ia64_partial_page_cachep, pp);
692 pp = next; 692 pp = next;
693 } 693 }
694 694
@@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl)
696} 696}
697 697
698void 698void
699ia32_drop_partial_page_list(struct task_struct *task) 699ia32_drop_ia64_partial_page_list(struct task_struct *task)
700{ 700{
701 struct partial_page_list* ppl = task->thread.ppl; 701 struct ia64_partial_page_list* ppl = task->thread.ppl;
702 702
703 if (ppl && atomic_dec_and_test(&ppl->pp_count)) 703 if (ppl && atomic_dec_and_test(&ppl->pp_count))
704 __ia32_drop_pp_list(ppl); 704 __ia32_drop_pp_list(ppl);
@@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task)
708 * Copy current->thread.ppl to ppl (already initialized). 708 * Copy current->thread.ppl to ppl (already initialized).
709 */ 709 */
710static int 710static int
711__ia32_copy_pp_list(struct partial_page_list *ppl) 711__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
712{ 712{
713 struct partial_page *pp, *tmp, *prev; 713 struct ia64_partial_page *pp, *tmp, *prev;
714 struct rb_node **rb_link, *rb_parent; 714 struct rb_node **rb_link, *rb_parent;
715 715
716 ppl->pp_head = NULL; 716 ppl->pp_head = NULL;
@@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
721 prev = NULL; 721 prev = NULL;
722 722
723 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { 723 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
724 tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); 724 tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
725 if (!tmp) 725 if (!tmp)
726 return -ENOMEM; 726 return -ENOMEM;
727 *tmp = *pp; 727 *tmp = *pp;
@@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
734} 734}
735 735
736int 736int
737ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags) 737ia32_copy_ia64_partial_page_list(struct task_struct *p,
738 unsigned long clone_flags)
738{ 739{
739 int retval = 0; 740 int retval = 0;
740 741
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 103dd8edda71..3d45d24a9d61 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -67,6 +67,8 @@ EXPORT_SYMBOL(pm_power_off);
67unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
68unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
69 69
70unsigned long acpi_wakeup_address = 0;
71
70const char __init * 72const char __init *
71acpi_get_sysname(void) 73acpi_get_sysname(void)
72{ 74{
@@ -739,16 +741,15 @@ int __init acpi_boot_init(void)
739 741
740int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 742int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
741{ 743{
742 int vector; 744 int tmp;
743 745
744 if (has_8259 && gsi < 16) 746 if (has_8259 && gsi < 16)
745 *irq = isa_irq_to_vector(gsi); 747 *irq = isa_irq_to_vector(gsi);
746 else { 748 else {
747 vector = gsi_to_vector(gsi); 749 tmp = gsi_to_irq(gsi);
748 if (vector == -1) 750 if (tmp == -1)
749 return -1; 751 return -1;
750 752 *irq = tmp;
751 *irq = vector;
752 } 753 }
753 return 0; 754 return 0;
754} 755}
@@ -986,4 +987,21 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
986 987
987EXPORT_SYMBOL(acpi_unregister_ioapic); 988EXPORT_SYMBOL(acpi_unregister_ioapic);
988 989
990/*
991 * acpi_save_state_mem() - save kernel state
992 *
993 * TBD when when IA64 starts to support suspend...
994 */
995int acpi_save_state_mem(void) { return 0; }
996
997/*
998 * acpi_restore_state()
999 */
1000void acpi_restore_state_mem(void) {}
1001
1002/*
1003 * do_suspend_lowlevel()
1004 */
1005void do_suspend_lowlevel(void) {}
1006
989#endif /* CONFIG_ACPI */ 1007#endif /* CONFIG_ACPI */
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 2fd96d9062a1..790ef0d87e12 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -38,11 +38,11 @@ static struct clocksource clocksource_cyclone = {
38 38
39int __init init_cyclone_clock(void) 39int __init init_cyclone_clock(void)
40{ 40{
41 u64* reg; 41 u64 __iomem *reg;
42 u64 base; /* saved cyclone base address */ 42 u64 base; /* saved cyclone base address */
43 u64 offset; /* offset from pageaddr to cyclone_timer register */ 43 u64 offset; /* offset from pageaddr to cyclone_timer register */
44 int i; 44 int i;
45 u32* volatile cyclone_timer; /* Cyclone MPMC0 register */ 45 u32 __iomem *cyclone_timer; /* Cyclone MPMC0 register */
46 46
47 if (!use_cyclone) 47 if (!use_cyclone)
48 return 0; 48 return 0;
@@ -51,7 +51,7 @@ int __init init_cyclone_clock(void)
51 51
52 /* find base address */ 52 /* find base address */
53 offset = (CYCLONE_CBAR_ADDR); 53 offset = (CYCLONE_CBAR_ADDR);
54 reg = (u64*)ioremap_nocache(offset, sizeof(u64)); 54 reg = ioremap_nocache(offset, sizeof(u64));
55 if(!reg){ 55 if(!reg){
56 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 56 printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
57 " register.\n"); 57 " register.\n");
@@ -69,7 +69,7 @@ int __init init_cyclone_clock(void)
69 69
70 /* setup PMCC */ 70 /* setup PMCC */
71 offset = (base + CYCLONE_PMCC_OFFSET); 71 offset = (base + CYCLONE_PMCC_OFFSET);
72 reg = (u64*)ioremap_nocache(offset, sizeof(u64)); 72 reg = ioremap_nocache(offset, sizeof(u64));
73 if(!reg){ 73 if(!reg){
74 printk(KERN_ERR "Summit chipset: Could not find valid PMCC" 74 printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
75 " register.\n"); 75 " register.\n");
@@ -81,7 +81,7 @@ int __init init_cyclone_clock(void)
81 81
82 /* setup MPCS */ 82 /* setup MPCS */
83 offset = (base + CYCLONE_MPCS_OFFSET); 83 offset = (base + CYCLONE_MPCS_OFFSET);
84 reg = (u64*)ioremap_nocache(offset, sizeof(u64)); 84 reg = ioremap_nocache(offset, sizeof(u64));
85 if(!reg){ 85 if(!reg){
86 printk(KERN_ERR "Summit chipset: Could not find valid MPCS" 86 printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
87 " register.\n"); 87 " register.\n");
@@ -93,7 +93,7 @@ int __init init_cyclone_clock(void)
93 93
94 /* map in cyclone_timer */ 94 /* map in cyclone_timer */
95 offset = (base + CYCLONE_MPMC_OFFSET); 95 offset = (base + CYCLONE_MPMC_OFFSET);
96 cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32)); 96 cyclone_timer = ioremap_nocache(offset, sizeof(u32));
97 if(!cyclone_timer){ 97 if(!cyclone_timer){
98 printk(KERN_ERR "Summit chipset: Could not find valid MPMC" 98 printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
99 " register.\n"); 99 " register.\n");
@@ -110,7 +110,7 @@ int __init init_cyclone_clock(void)
110 printk(KERN_ERR "Summit chipset: Counter not counting!" 110 printk(KERN_ERR "Summit chipset: Counter not counting!"
111 " DISABLED\n"); 111 " DISABLED\n");
112 iounmap(cyclone_timer); 112 iounmap(cyclone_timer);
113 cyclone_timer = 0; 113 cyclone_timer = NULL;
114 use_cyclone = 0; 114 use_cyclone = 0;
115 return -ENODEV; 115 return -ENODEV;
116 } 116 }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 44d540efa6d1..4e5e27540e27 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -178,7 +178,7 @@ swapper_pg_dir:
178halt_msg: 178halt_msg:
179 stringz "Halting kernel\n" 179 stringz "Halting kernel\n"
180 180
181 .text 181 .section .text.head,"ax"
182 182
183 .global start_ap 183 .global start_ap
184 184
@@ -392,6 +392,8 @@ self: hint @pause
392 br.sptk.many self // endless loop 392 br.sptk.many self // endless loop
393END(_start) 393END(_start)
394 394
395 .text
396
395GLOBAL_ENTRY(ia64_save_debug_regs) 397GLOBAL_ENTRY(ia64_save_debug_regs)
396 alloc r16=ar.pfs,1,0,0,0 398 alloc r16=ar.pfs,1,0,0,0
397 mov r20=ar.lc // preserve ar.lc 399 mov r20=ar.lc // preserve ar.lc
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index b69c397ed1bf..bc8efcad28b8 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/fs.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/init_task.h> 14#include <linux/init_task.h>
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 91797c111162..9386b955eed1 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -85,8 +85,8 @@ DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR 85 [0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
86}; 86};
87 87
88static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = { 88static cpumask_t vector_table[IA64_NUM_VECTORS] = {
89 [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE 89 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
90}; 90};
91 91
92static int irq_status[NR_IRQS] = { 92static int irq_status[NR_IRQS] = {
@@ -123,17 +123,18 @@ static inline int find_unassigned_irq(void)
123static inline int find_unassigned_vector(cpumask_t domain) 123static inline int find_unassigned_vector(cpumask_t domain)
124{ 124{
125 cpumask_t mask; 125 cpumask_t mask;
126 int pos; 126 int pos, vector;
127 127
128 cpus_and(mask, domain, cpu_online_map); 128 cpus_and(mask, domain, cpu_online_map);
129 if (cpus_empty(mask)) 129 if (cpus_empty(mask))
130 return -EINVAL; 130 return -EINVAL;
131 131
132 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { 132 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
133 cpus_and(mask, domain, vector_table[pos]); 133 vector = IA64_FIRST_DEVICE_VECTOR + pos;
134 cpus_and(mask, domain, vector_table[vector]);
134 if (!cpus_empty(mask)) 135 if (!cpus_empty(mask))
135 continue; 136 continue;
136 return IA64_FIRST_DEVICE_VECTOR + pos; 137 return vector;
137 } 138 }
138 return -ENOSPC; 139 return -ENOSPC;
139} 140}
@@ -141,9 +142,12 @@ static inline int find_unassigned_vector(cpumask_t domain)
141static int __bind_irq_vector(int irq, int vector, cpumask_t domain) 142static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
142{ 143{
143 cpumask_t mask; 144 cpumask_t mask;
144 int cpu, pos; 145 int cpu;
145 struct irq_cfg *cfg = &irq_cfg[irq]; 146 struct irq_cfg *cfg = &irq_cfg[irq];
146 147
148 BUG_ON((unsigned)irq >= NR_IRQS);
149 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
150
147 cpus_and(mask, domain, cpu_online_map); 151 cpus_and(mask, domain, cpu_online_map);
148 if (cpus_empty(mask)) 152 if (cpus_empty(mask))
149 return -EINVAL; 153 return -EINVAL;
@@ -156,8 +160,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
156 cfg->vector = vector; 160 cfg->vector = vector;
157 cfg->domain = domain; 161 cfg->domain = domain;
158 irq_status[irq] = IRQ_USED; 162 irq_status[irq] = IRQ_USED;
159 pos = vector - IA64_FIRST_DEVICE_VECTOR; 163 cpus_or(vector_table[vector], vector_table[vector], domain);
160 cpus_or(vector_table[pos], vector_table[pos], domain);
161 return 0; 164 return 0;
162} 165}
163 166
@@ -174,7 +177,7 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)
174 177
175static void __clear_irq_vector(int irq) 178static void __clear_irq_vector(int irq)
176{ 179{
177 int vector, cpu, pos; 180 int vector, cpu;
178 cpumask_t mask; 181 cpumask_t mask;
179 cpumask_t domain; 182 cpumask_t domain;
180 struct irq_cfg *cfg = &irq_cfg[irq]; 183 struct irq_cfg *cfg = &irq_cfg[irq];
@@ -189,8 +192,7 @@ static void __clear_irq_vector(int irq)
189 cfg->vector = IRQ_VECTOR_UNASSIGNED; 192 cfg->vector = IRQ_VECTOR_UNASSIGNED;
190 cfg->domain = CPU_MASK_NONE; 193 cfg->domain = CPU_MASK_NONE;
191 irq_status[irq] = IRQ_UNUSED; 194 irq_status[irq] = IRQ_UNUSED;
192 pos = vector - IA64_FIRST_DEVICE_VECTOR; 195 cpus_andnot(vector_table[vector], vector_table[vector], domain);
193 cpus_andnot(vector_table[pos], vector_table[pos], domain);
194} 196}
195 197
196static void clear_irq_vector(int irq) 198static void clear_irq_vector(int irq)
@@ -212,9 +214,6 @@ assign_irq_vector (int irq)
212 vector = -ENOSPC; 214 vector = -ENOSPC;
213 215
214 spin_lock_irqsave(&vector_lock, flags); 216 spin_lock_irqsave(&vector_lock, flags);
215 if (irq < 0) {
216 goto out;
217 }
218 for_each_online_cpu(cpu) { 217 for_each_online_cpu(cpu) {
219 domain = vector_allocation_domain(cpu); 218 domain = vector_allocation_domain(cpu);
220 vector = find_unassigned_vector(domain); 219 vector = find_unassigned_vector(domain);
@@ -223,6 +222,8 @@ assign_irq_vector (int irq)
223 } 222 }
224 if (vector < 0) 223 if (vector < 0)
225 goto out; 224 goto out;
225 if (irq == AUTO_ASSIGN)
226 irq = vector;
226 BUG_ON(__bind_irq_vector(irq, vector, domain)); 227 BUG_ON(__bind_irq_vector(irq, vector, domain));
227 out: 228 out:
228 spin_unlock_irqrestore(&vector_lock, flags); 229 spin_unlock_irqrestore(&vector_lock, flags);
@@ -288,7 +289,7 @@ static int __init parse_vector_domain(char *arg)
288 vector_domain_type = VECTOR_DOMAIN_PERCPU; 289 vector_domain_type = VECTOR_DOMAIN_PERCPU;
289 no_int_routing = 1; 290 no_int_routing = 1;
290 } 291 }
291 return 1; 292 return 0;
292} 293}
293early_param("vector", parse_vector_domain); 294early_param("vector", parse_vector_domain);
294#else 295#else
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 13df337508e7..7ccb228ceedc 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -13,14 +13,6 @@
13struct ia64_machine_vector ia64_mv; 13struct ia64_machine_vector ia64_mv;
14EXPORT_SYMBOL(ia64_mv); 14EXPORT_SYMBOL(ia64_mv);
15 15
16static __initdata const char *mvec_name;
17static __init int setup_mvec(char *s)
18{
19 mvec_name = s;
20 return 0;
21}
22early_param("machvec", setup_mvec);
23
24static struct ia64_machine_vector * __init 16static struct ia64_machine_vector * __init
25lookup_machvec (const char *name) 17lookup_machvec (const char *name)
26{ 18{
@@ -41,7 +33,7 @@ machvec_init (const char *name)
41 struct ia64_machine_vector *mv; 33 struct ia64_machine_vector *mv;
42 34
43 if (!name) 35 if (!name)
44 name = mvec_name ? mvec_name : acpi_get_sysname(); 36 name = acpi_get_sysname();
45 mv = lookup_machvec(name); 37 mv = lookup_machvec(name);
46 if (!mv) 38 if (!mv)
47 panic("generic kernel failed to find machine vector for" 39 panic("generic kernel failed to find machine vector for"
@@ -51,6 +43,23 @@ machvec_init (const char *name)
51 printk(KERN_INFO "booting generic kernel on platform %s\n", name); 43 printk(KERN_INFO "booting generic kernel on platform %s\n", name);
52} 44}
53 45
46void __init
47machvec_init_from_cmdline(const char *cmdline)
48{
49 char str[64];
50 const char *start;
51 char *end;
52
53 if (! (start = strstr(cmdline, "machvec=")) )
54 return machvec_init(NULL);
55
56 strlcpy(str, start + strlen("machvec="), sizeof(str));
57 if ( (end = strchr(str, ' ')) )
58 *end = '\0';
59
60 return machvec_init(str);
61}
62
54#endif /* CONFIG_IA64_GENERIC */ 63#endif /* CONFIG_IA64_GENERIC */
55 64
56void 65void
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index fa40cba43350..4158906c45aa 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags,
499 499
500 /* Copy partially mapped page list */ 500 /* Copy partially mapped page list */
501 if (!retval) 501 if (!retval)
502 retval = ia32_copy_partial_page_list(p, clone_flags); 502 retval = ia32_copy_ia64_partial_page_list(p,
503 clone_flags);
503 } 504 }
504#endif 505#endif
505 506
@@ -728,7 +729,7 @@ flush_thread (void)
728 ia64_drop_fpu(current); 729 ia64_drop_fpu(current);
729#ifdef CONFIG_IA32_SUPPORT 730#ifdef CONFIG_IA32_SUPPORT
730 if (IS_IA32_PROCESS(task_pt_regs(current))) { 731 if (IS_IA32_PROCESS(task_pt_regs(current))) {
731 ia32_drop_partial_page_list(current); 732 ia32_drop_ia64_partial_page_list(current);
732 current->thread.task_size = IA32_PAGE_OFFSET; 733 current->thread.task_size = IA32_PAGE_OFFSET;
733 set_fs(USER_DS); 734 set_fs(USER_DS);
734 } 735 }
@@ -754,7 +755,7 @@ exit_thread (void)
754 pfm_release_debug_registers(current); 755 pfm_release_debug_registers(current);
755#endif 756#endif
756 if (IS_IA32_PROCESS(task_pt_regs(current))) 757 if (IS_IA32_PROCESS(task_pt_regs(current)))
757 ia32_drop_partial_page_list(current); 758 ia32_drop_ia64_partial_page_list(current);
758} 759}
759 760
760unsigned long 761unsigned long
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index cf06fe799041..7cecd2964200 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -491,12 +491,17 @@ setup_arch (char **cmdline_p)
491 efi_init(); 491 efi_init();
492 io_port_init(); 492 io_port_init();
493 493
494 parse_early_param();
495
496#ifdef CONFIG_IA64_GENERIC 494#ifdef CONFIG_IA64_GENERIC
497 machvec_init(NULL); 495 /* machvec needs to be parsed from the command line
496 * before parse_early_param() is called to ensure
497 * that ia64_mv is initialised before any command line
498 * settings may cause console setup to occur
499 */
500 machvec_init_from_cmdline(*cmdline_p);
498#endif 501#endif
499 502
503 parse_early_param();
504
500 if (early_console_setup(*cmdline_p) == 0) 505 if (early_console_setup(*cmdline_p) == 0)
501 mark_bsp_online(); 506 mark_bsp_online();
502 507
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9f72838db26e..0982882bfb80 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -468,7 +468,7 @@ smp_send_stop (void)
468 send_IPI_allbutself(IPI_CPU_STOP); 468 send_IPI_allbutself(IPI_CPU_STOP);
469} 469}
470 470
471int __init 471int
472setup_profiling_timer (unsigned int multiplier) 472setup_profiling_timer (unsigned int multiplier)
473{ 473{
474 return -EINVAL; 474 return -EINVAL;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 9f5c90b594b9..62209dcf06d3 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -487,7 +487,7 @@ struct create_idle {
487 int cpu; 487 int cpu;
488}; 488};
489 489
490void 490void __cpuinit
491do_fork_idle(struct work_struct *work) 491do_fork_idle(struct work_struct *work)
492{ 492{
493 struct create_idle *c_idle = 493 struct create_idle *c_idle =
@@ -497,7 +497,7 @@ do_fork_idle(struct work_struct *work)
497 complete(&c_idle->done); 497 complete(&c_idle->done);
498} 498}
499 499
500static int __devinit 500static int __cpuinit
501do_boot_cpu (int sapicid, int cpu) 501do_boot_cpu (int sapicid, int cpu)
502{ 502{
503 int timeout; 503 int timeout;
@@ -808,7 +808,7 @@ set_cpu_sibling_map(int cpu)
808 } 808 }
809} 809}
810 810
811int __devinit 811int __cpuinit
812__cpu_up (unsigned int cpu) 812__cpu_up (unsigned int cpu)
813{ 813{
814 int ret; 814 int ret;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 627785c48ea9..6c0e9e2e1b82 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -52,7 +52,7 @@ static struct clocksource clocksource_itc = {
52 .name = "itc", 52 .name = "itc",
53 .rating = 350, 53 .rating = 350,
54 .read = itc_get_cycles, 54 .read = itc_get_cycles,
55 .mask = 0xffffffffffffffff, 55 .mask = CLOCKSOURCE_MASK(64),
56 .mult = 0, /*to be caluclated*/ 56 .mult = 0, /*to be caluclated*/
57 .shift = 16, 57 .shift = 16,
58 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 58 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -255,7 +255,7 @@ ia64_init_itm (void)
255 } 255 }
256} 256}
257 257
258static cycle_t itc_get_cycles() 258static cycle_t itc_get_cycles(void)
259{ 259{
260 u64 lcycle, now, ret; 260 u64 lcycle, now, ret;
261 261
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 860f251d2fc2..83e80677de70 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -50,6 +50,8 @@ SECTIONS
50 KPROBES_TEXT 50 KPROBES_TEXT
51 *(.gnu.linkonce.t*) 51 *(.gnu.linkonce.t*)
52 } 52 }
53 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
54 { *(.text.head) }
53 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) 55 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
54 { *(.text2) } 56 { *(.text2) }
55#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 07d0e92742c8..488e48a5deea 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -581,7 +581,7 @@ pcibios_align_resource (void *data, struct resource *res,
581/* 581/*
582 * PCI BIOS setup, always defaults to SAL interface 582 * PCI BIOS setup, always defaults to SAL interface
583 */ 583 */
584char * __init 584char * __devinit
585pcibios_setup (char *str) 585pcibios_setup (char *str)
586{ 586{
587 return str; 587 return str;