diff options
Diffstat (limited to 'arch/x86/platform')
-rw-r--r-- | arch/x86/platform/ce4100/ce4100.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/ce4100/falconfalls.dts | 8 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 78 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_64.c | 34 | ||||
-rw-r--r-- | arch/x86/platform/mrst/mrst.c | 14 | ||||
-rw-r--r-- | arch/x86/platform/mrst/vrtc.c | 4 | ||||
-rw-r--r-- | arch/x86/platform/olpc/olpc-xo1.c | 25 | ||||
-rw-r--r-- | arch/x86/platform/olpc/olpc_dt.c | 3 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 93 | ||||
-rw-r--r-- | arch/x86/platform/uv/uv_irq.c | 4 | ||||
-rw-r--r-- | arch/x86/platform/uv/uv_time.c | 6 | ||||
-rw-r--r-- | arch/x86/platform/visws/visws_quirks.c | 24 |
12 files changed, 171 insertions, 124 deletions
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index 68c0dbcc95be..28071bb31db7 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/serial_reg.h> | 15 | #include <linux/serial_reg.h> |
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | 17 | ||
18 | #include <asm/ce4100.h> | ||
18 | #include <asm/prom.h> | 19 | #include <asm/prom.h> |
19 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
20 | #include <asm/i8259.h> | 21 | #include <asm/i8259.h> |
@@ -136,6 +137,7 @@ void __init x86_ce4100_early_setup(void) | |||
136 | x86_init.resources.probe_roms = x86_init_noop; | 137 | x86_init.resources.probe_roms = x86_init_noop; |
137 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 138 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
138 | x86_init.mpparse.find_smp_config = x86_init_noop; | 139 | x86_init.mpparse.find_smp_config = x86_init_noop; |
140 | x86_init.pci.init = ce4100_pci_init; | ||
139 | 141 | ||
140 | #ifdef CONFIG_X86_IO_APIC | 142 | #ifdef CONFIG_X86_IO_APIC |
141 | x86_init.pci.init_irq = sdv_pci_init; | 143 | x86_init.pci.init_irq = sdv_pci_init; |
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts index dc701ea58546..e70be38ce039 100644 --- a/arch/x86/platform/ce4100/falconfalls.dts +++ b/arch/x86/platform/ce4100/falconfalls.dts | |||
@@ -74,6 +74,7 @@ | |||
74 | compatible = "intel,ce4100-pci", "pci"; | 74 | compatible = "intel,ce4100-pci", "pci"; |
75 | device_type = "pci"; | 75 | device_type = "pci"; |
76 | bus-range = <1 1>; | 76 | bus-range = <1 1>; |
77 | reg = <0x0800 0x0 0x0 0x0 0x0>; | ||
77 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | 78 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; |
78 | 79 | ||
79 | interrupt-parent = <&ioapic2>; | 80 | interrupt-parent = <&ioapic2>; |
@@ -346,7 +347,7 @@ | |||
346 | "pciclass0c03"; | 347 | "pciclass0c03"; |
347 | 348 | ||
348 | reg = <0x16800 0x0 0x0 0x0 0x0>; | 349 | reg = <0x16800 0x0 0x0 0x0 0x0>; |
349 | interrupts = <22 3>; | 350 | interrupts = <22 1>; |
350 | }; | 351 | }; |
351 | 352 | ||
352 | usb@d,1 { | 353 | usb@d,1 { |
@@ -356,7 +357,7 @@ | |||
356 | "pciclass0c03"; | 357 | "pciclass0c03"; |
357 | 358 | ||
358 | reg = <0x16900 0x0 0x0 0x0 0x0>; | 359 | reg = <0x16900 0x0 0x0 0x0 0x0>; |
359 | interrupts = <22 3>; | 360 | interrupts = <22 1>; |
360 | }; | 361 | }; |
361 | 362 | ||
362 | sata@e,0 { | 363 | sata@e,0 { |
@@ -366,7 +367,7 @@ | |||
366 | "pciclass0106"; | 367 | "pciclass0106"; |
367 | 368 | ||
368 | reg = <0x17000 0x0 0x0 0x0 0x0>; | 369 | reg = <0x17000 0x0 0x0 0x0 0x0>; |
369 | interrupts = <23 3>; | 370 | interrupts = <23 1>; |
370 | }; | 371 | }; |
371 | 372 | ||
372 | flash@f,0 { | 373 | flash@f,0 { |
@@ -412,6 +413,7 @@ | |||
412 | #address-cells = <2>; | 413 | #address-cells = <2>; |
413 | #size-cells = <1>; | 414 | #size-cells = <1>; |
414 | compatible = "isa"; | 415 | compatible = "isa"; |
416 | reg = <0xf800 0x0 0x0 0x0 0x0>; | ||
415 | ranges = <1 0 0 0 0 0x100>; | 417 | ranges = <1 0 0 0 0 0x100>; |
416 | 418 | ||
417 | rtc@70 { | 419 | rtc@70 { |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 0fe27d7c6258..b30aa26a8df2 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type, | |||
145 | data_size, data); | 145 | data_size, data); |
146 | } | 146 | } |
147 | 147 | ||
148 | static efi_status_t virt_efi_set_virtual_address_map( | ||
149 | unsigned long memory_map_size, | ||
150 | unsigned long descriptor_size, | ||
151 | u32 descriptor_version, | ||
152 | efi_memory_desc_t *virtual_map) | ||
153 | { | ||
154 | return efi_call_virt4(set_virtual_address_map, | ||
155 | memory_map_size, descriptor_size, | ||
156 | descriptor_version, virtual_map); | ||
157 | } | ||
158 | |||
159 | static efi_status_t __init phys_efi_set_virtual_address_map( | 148 | static efi_status_t __init phys_efi_set_virtual_address_map( |
160 | unsigned long memory_map_size, | 149 | unsigned long memory_map_size, |
161 | unsigned long descriptor_size, | 150 | unsigned long descriptor_size, |
@@ -468,11 +457,25 @@ void __init efi_init(void) | |||
468 | #endif | 457 | #endif |
469 | } | 458 | } |
470 | 459 | ||
460 | void __init efi_set_executable(efi_memory_desc_t *md, bool executable) | ||
461 | { | ||
462 | u64 addr, npages; | ||
463 | |||
464 | addr = md->virt_addr; | ||
465 | npages = md->num_pages; | ||
466 | |||
467 | memrange_efi_to_native(&addr, &npages); | ||
468 | |||
469 | if (executable) | ||
470 | set_memory_x(addr, npages); | ||
471 | else | ||
472 | set_memory_nx(addr, npages); | ||
473 | } | ||
474 | |||
471 | static void __init runtime_code_page_mkexec(void) | 475 | static void __init runtime_code_page_mkexec(void) |
472 | { | 476 | { |
473 | efi_memory_desc_t *md; | 477 | efi_memory_desc_t *md; |
474 | void *p; | 478 | void *p; |
475 | u64 addr, npages; | ||
476 | 479 | ||
477 | /* Make EFI runtime service code area executable */ | 480 | /* Make EFI runtime service code area executable */ |
478 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 481 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
@@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void) | |||
481 | if (md->type != EFI_RUNTIME_SERVICES_CODE) | 484 | if (md->type != EFI_RUNTIME_SERVICES_CODE) |
482 | continue; | 485 | continue; |
483 | 486 | ||
484 | addr = md->virt_addr; | 487 | efi_set_executable(md, true); |
485 | npages = md->num_pages; | ||
486 | memrange_efi_to_native(&addr, &npages); | ||
487 | set_memory_x(addr, npages); | ||
488 | } | 488 | } |
489 | } | 489 | } |
490 | 490 | ||
@@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void) | |||
498 | */ | 498 | */ |
499 | void __init efi_enter_virtual_mode(void) | 499 | void __init efi_enter_virtual_mode(void) |
500 | { | 500 | { |
501 | efi_memory_desc_t *md; | 501 | efi_memory_desc_t *md, *prev_md = NULL; |
502 | efi_status_t status; | 502 | efi_status_t status; |
503 | unsigned long size; | 503 | unsigned long size; |
504 | u64 end, systab, addr, npages, end_pfn; | 504 | u64 end, systab, addr, npages, end_pfn; |
505 | void *p, *va; | 505 | void *p, *va, *new_memmap = NULL; |
506 | int count = 0; | ||
506 | 507 | ||
507 | efi.systab = NULL; | 508 | efi.systab = NULL; |
509 | |||
510 | /* Merge contiguous regions of the same type and attribute */ | ||
511 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
512 | u64 prev_size; | ||
513 | md = p; | ||
514 | |||
515 | if (!prev_md) { | ||
516 | prev_md = md; | ||
517 | continue; | ||
518 | } | ||
519 | |||
520 | if (prev_md->type != md->type || | ||
521 | prev_md->attribute != md->attribute) { | ||
522 | prev_md = md; | ||
523 | continue; | ||
524 | } | ||
525 | |||
526 | prev_size = prev_md->num_pages << EFI_PAGE_SHIFT; | ||
527 | |||
528 | if (md->phys_addr == (prev_md->phys_addr + prev_size)) { | ||
529 | prev_md->num_pages += md->num_pages; | ||
530 | md->type = EFI_RESERVED_TYPE; | ||
531 | md->attribute = 0; | ||
532 | continue; | ||
533 | } | ||
534 | prev_md = md; | ||
535 | } | ||
536 | |||
508 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 537 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
509 | md = p; | 538 | md = p; |
510 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 539 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
@@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void) | |||
541 | systab += md->virt_addr - md->phys_addr; | 570 | systab += md->virt_addr - md->phys_addr; |
542 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | 571 | efi.systab = (efi_system_table_t *) (unsigned long) systab; |
543 | } | 572 | } |
573 | new_memmap = krealloc(new_memmap, | ||
574 | (count + 1) * memmap.desc_size, | ||
575 | GFP_KERNEL); | ||
576 | memcpy(new_memmap + (count * memmap.desc_size), md, | ||
577 | memmap.desc_size); | ||
578 | count++; | ||
544 | } | 579 | } |
545 | 580 | ||
546 | BUG_ON(!efi.systab); | 581 | BUG_ON(!efi.systab); |
547 | 582 | ||
548 | status = phys_efi_set_virtual_address_map( | 583 | status = phys_efi_set_virtual_address_map( |
549 | memmap.desc_size * memmap.nr_map, | 584 | memmap.desc_size * count, |
550 | memmap.desc_size, | 585 | memmap.desc_size, |
551 | memmap.desc_version, | 586 | memmap.desc_version, |
552 | memmap.phys_map); | 587 | (efi_memory_desc_t *)__pa(new_memmap)); |
553 | 588 | ||
554 | if (status != EFI_SUCCESS) { | 589 | if (status != EFI_SUCCESS) { |
555 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " | 590 | printk(KERN_ALERT "Unable to switch EFI into virtual mode " |
@@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void) | |||
572 | efi.set_variable = virt_efi_set_variable; | 607 | efi.set_variable = virt_efi_set_variable; |
573 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; | 608 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; |
574 | efi.reset_system = virt_efi_reset_system; | 609 | efi.reset_system = virt_efi_reset_system; |
575 | efi.set_virtual_address_map = virt_efi_set_virtual_address_map; | 610 | efi.set_virtual_address_map = NULL; |
576 | if (__supported_pte_mask & _PAGE_NX) | 611 | if (__supported_pte_mask & _PAGE_NX) |
577 | runtime_code_page_mkexec(); | 612 | runtime_code_page_mkexec(); |
578 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); | 613 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); |
579 | memmap.map = NULL; | 614 | memmap.map = NULL; |
615 | kfree(new_memmap); | ||
580 | } | 616 | } |
581 | 617 | ||
582 | /* | 618 | /* |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac0621a7ac3d..2649426a7905 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -41,22 +41,7 @@ | |||
41 | static pgd_t save_pgd __initdata; | 41 | static pgd_t save_pgd __initdata; |
42 | static unsigned long efi_flags __initdata; | 42 | static unsigned long efi_flags __initdata; |
43 | 43 | ||
44 | static void __init early_mapping_set_exec(unsigned long start, | 44 | static void __init early_code_mapping_set_exec(int executable) |
45 | unsigned long end, | ||
46 | int executable) | ||
47 | { | ||
48 | unsigned long num_pages; | ||
49 | |||
50 | start &= PMD_MASK; | ||
51 | end = (end + PMD_SIZE - 1) & PMD_MASK; | ||
52 | num_pages = (end - start) >> PAGE_SHIFT; | ||
53 | if (executable) | ||
54 | set_memory_x((unsigned long)__va(start), num_pages); | ||
55 | else | ||
56 | set_memory_nx((unsigned long)__va(start), num_pages); | ||
57 | } | ||
58 | |||
59 | static void __init early_runtime_code_mapping_set_exec(int executable) | ||
60 | { | 45 | { |
61 | efi_memory_desc_t *md; | 46 | efi_memory_desc_t *md; |
62 | void *p; | 47 | void *p; |
@@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable) | |||
67 | /* Make EFI runtime service code area executable */ | 52 | /* Make EFI runtime service code area executable */ |
68 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 53 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
69 | md = p; | 54 | md = p; |
70 | if (md->type == EFI_RUNTIME_SERVICES_CODE) { | 55 | if (md->type == EFI_RUNTIME_SERVICES_CODE) |
71 | unsigned long end; | 56 | efi_set_executable(md, executable); |
72 | end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
73 | early_mapping_set_exec(md->phys_addr, end, executable); | ||
74 | } | ||
75 | } | 57 | } |
76 | } | 58 | } |
77 | 59 | ||
@@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void) | |||
79 | { | 61 | { |
80 | unsigned long vaddress; | 62 | unsigned long vaddress; |
81 | 63 | ||
82 | early_runtime_code_mapping_set_exec(1); | 64 | early_code_mapping_set_exec(1); |
83 | local_irq_save(efi_flags); | 65 | local_irq_save(efi_flags); |
84 | vaddress = (unsigned long)__va(0x0UL); | 66 | vaddress = (unsigned long)__va(0x0UL); |
85 | save_pgd = *pgd_offset_k(0x0UL); | 67 | save_pgd = *pgd_offset_k(0x0UL); |
@@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void) | |||
95 | set_pgd(pgd_offset_k(0x0UL), save_pgd); | 77 | set_pgd(pgd_offset_k(0x0UL), save_pgd); |
96 | __flush_tlb_all(); | 78 | __flush_tlb_all(); |
97 | local_irq_restore(efi_flags); | 79 | local_irq_restore(efi_flags); |
98 | early_runtime_code_mapping_set_exec(0); | 80 | early_code_mapping_set_exec(0); |
99 | } | 81 | } |
100 | 82 | ||
101 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | 83 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, |
@@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | |||
107 | return ioremap(phys_addr, size); | 89 | return ioremap(phys_addr, size); |
108 | 90 | ||
109 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); | 91 | last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); |
110 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) | 92 | if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { |
111 | return NULL; | 93 | unsigned long top = last_map_pfn << PAGE_SHIFT; |
94 | efi_ioremap(top, size - (top - phys_addr), type); | ||
95 | } | ||
112 | 96 | ||
113 | return (void __iomem *)__va(phys_addr); | 97 | return (void __iomem *)__va(phys_addr); |
114 | } | 98 | } |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 5c0207bf959b..7000e74b3087 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) | |||
97 | pentry->freq_hz, pentry->irq); | 97 | pentry->freq_hz, pentry->irq); |
98 | if (!pentry->irq) | 98 | if (!pentry->irq) |
99 | continue; | 99 | continue; |
100 | mp_irq.type = MP_IOAPIC; | 100 | mp_irq.type = MP_INTSRC; |
101 | mp_irq.irqtype = mp_INT; | 101 | mp_irq.irqtype = mp_INT; |
102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | 102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ |
103 | mp_irq.irqflag = 5; | 103 | mp_irq.irqflag = 5; |
104 | mp_irq.srcbus = 0; | 104 | mp_irq.srcbus = MP_BUS_ISA; |
105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
106 | mp_irq.dstapic = MP_APIC_ALL; | 106 | mp_irq.dstapic = MP_APIC_ALL; |
107 | mp_irq.dstirq = pentry->irq; | 107 | mp_irq.dstirq = pentry->irq; |
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) | |||
168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | 168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { |
169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", | 169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", |
170 | totallen, (u32)pentry->phys_addr, pentry->irq); | 170 | totallen, (u32)pentry->phys_addr, pentry->irq); |
171 | mp_irq.type = MP_IOAPIC; | 171 | mp_irq.type = MP_INTSRC; |
172 | mp_irq.irqtype = mp_INT; | 172 | mp_irq.irqtype = mp_INT; |
173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ | 173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ |
174 | mp_irq.srcbus = 0; | 174 | mp_irq.srcbus = MP_BUS_ISA; |
175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
176 | mp_irq.dstapic = MP_APIC_ALL; | 176 | mp_irq.dstapic = MP_APIC_ALL; |
177 | mp_irq.dstirq = pentry->irq; | 177 | mp_irq.dstirq = pentry->irq; |
@@ -194,7 +194,7 @@ static unsigned long __init mrst_calibrate_tsc(void) | |||
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | void __init mrst_time_init(void) | 197 | static void __init mrst_time_init(void) |
198 | { | 198 | { |
199 | sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); | 199 | sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr); |
200 | switch (mrst_timer_options) { | 200 | switch (mrst_timer_options) { |
@@ -216,7 +216,7 @@ void __init mrst_time_init(void) | |||
216 | apbt_time_init(); | 216 | apbt_time_init(); |
217 | } | 217 | } |
218 | 218 | ||
219 | void __cpuinit mrst_arch_setup(void) | 219 | static void __cpuinit mrst_arch_setup(void) |
220 | { | 220 | { |
221 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) | 221 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) |
222 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; | 222 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; |
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) | |||
282 | /* Avoid searching for BIOS MP tables */ | 282 | /* Avoid searching for BIOS MP tables */ |
283 | x86_init.mpparse.find_smp_config = x86_init_noop; | 283 | x86_init.mpparse.find_smp_config = x86_init_noop; |
284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
285 | 285 | set_bit(MP_BUS_ISA, mp_bus_not_pci); | |
286 | } | 286 | } |
287 | 287 | ||
288 | /* | 288 | /* |
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 04cf645feb92..73d70d65e76e 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c | |||
@@ -100,9 +100,11 @@ int vrtc_set_mmss(unsigned long nowtime) | |||
100 | 100 | ||
101 | void __init mrst_rtc_init(void) | 101 | void __init mrst_rtc_init(void) |
102 | { | 102 | { |
103 | unsigned long vrtc_paddr = sfi_mrtc_array[0].phys_addr; | 103 | unsigned long vrtc_paddr; |
104 | 104 | ||
105 | sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); | 105 | sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc); |
106 | |||
107 | vrtc_paddr = sfi_mrtc_array[0].phys_addr; | ||
106 | if (!sfi_mrtc_num || !vrtc_paddr) | 108 | if (!sfi_mrtc_num || !vrtc_paddr) |
107 | return; | 109 | return; |
108 | 110 | ||
diff --git a/arch/x86/platform/olpc/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c index 127775696d6c..ab81fb271760 100644 --- a/arch/x86/platform/olpc/olpc-xo1.c +++ b/arch/x86/platform/olpc/olpc-xo1.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
18 | #include <linux/mfd/core.h> | ||
18 | 19 | ||
19 | #include <asm/io.h> | 20 | #include <asm/io.h> |
20 | #include <asm/olpc.h> | 21 | #include <asm/olpc.h> |
@@ -56,25 +57,24 @@ static void xo1_power_off(void) | |||
56 | static int __devinit olpc_xo1_probe(struct platform_device *pdev) | 57 | static int __devinit olpc_xo1_probe(struct platform_device *pdev) |
57 | { | 58 | { |
58 | struct resource *res; | 59 | struct resource *res; |
60 | int err; | ||
59 | 61 | ||
60 | /* don't run on non-XOs */ | 62 | /* don't run on non-XOs */ |
61 | if (!machine_is_olpc()) | 63 | if (!machine_is_olpc()) |
62 | return -ENODEV; | 64 | return -ENODEV; |
63 | 65 | ||
66 | err = mfd_cell_enable(pdev); | ||
67 | if (err) | ||
68 | return err; | ||
69 | |||
64 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); | 70 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
65 | if (!res) { | 71 | if (!res) { |
66 | dev_err(&pdev->dev, "can't fetch device resource info\n"); | 72 | dev_err(&pdev->dev, "can't fetch device resource info\n"); |
67 | return -EIO; | 73 | return -EIO; |
68 | } | 74 | } |
69 | |||
70 | if (!request_region(res->start, resource_size(res), DRV_NAME)) { | ||
71 | dev_err(&pdev->dev, "can't request region\n"); | ||
72 | return -EIO; | ||
73 | } | ||
74 | |||
75 | if (strcmp(pdev->name, "cs5535-pms") == 0) | 75 | if (strcmp(pdev->name, "cs5535-pms") == 0) |
76 | pms_base = res->start; | 76 | pms_base = res->start; |
77 | else if (strcmp(pdev->name, "cs5535-acpi") == 0) | 77 | else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0) |
78 | acpi_base = res->start; | 78 | acpi_base = res->start; |
79 | 79 | ||
80 | /* If we have both addresses, we can override the poweroff hook */ | 80 | /* If we have both addresses, we can override the poweroff hook */ |
@@ -88,14 +88,11 @@ static int __devinit olpc_xo1_probe(struct platform_device *pdev) | |||
88 | 88 | ||
89 | static int __devexit olpc_xo1_remove(struct platform_device *pdev) | 89 | static int __devexit olpc_xo1_remove(struct platform_device *pdev) |
90 | { | 90 | { |
91 | struct resource *r; | 91 | mfd_cell_disable(pdev); |
92 | |||
93 | r = platform_get_resource(pdev, IORESOURCE_IO, 0); | ||
94 | release_region(r->start, resource_size(r)); | ||
95 | 92 | ||
96 | if (strcmp(pdev->name, "cs5535-pms") == 0) | 93 | if (strcmp(pdev->name, "cs5535-pms") == 0) |
97 | pms_base = 0; | 94 | pms_base = 0; |
98 | else if (strcmp(pdev->name, "cs5535-acpi") == 0) | 95 | else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0) |
99 | acpi_base = 0; | 96 | acpi_base = 0; |
100 | 97 | ||
101 | pm_power_off = NULL; | 98 | pm_power_off = NULL; |
@@ -113,7 +110,7 @@ static struct platform_driver cs5535_pms_drv = { | |||
113 | 110 | ||
114 | static struct platform_driver cs5535_acpi_drv = { | 111 | static struct platform_driver cs5535_acpi_drv = { |
115 | .driver = { | 112 | .driver = { |
116 | .name = "cs5535-acpi", | 113 | .name = "olpc-xo1-pm-acpi", |
117 | .owner = THIS_MODULE, | 114 | .owner = THIS_MODULE, |
118 | }, | 115 | }, |
119 | .probe = olpc_xo1_probe, | 116 | .probe = olpc_xo1_probe, |
@@ -143,7 +140,7 @@ static void __exit olpc_xo1_exit(void) | |||
143 | 140 | ||
144 | MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>"); | 141 | MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>"); |
145 | MODULE_LICENSE("GPL"); | 142 | MODULE_LICENSE("GPL"); |
146 | MODULE_ALIAS("platform:olpc-xo1"); | 143 | MODULE_ALIAS("platform:cs5535-pms"); |
147 | 144 | ||
148 | module_init(olpc_xo1_init); | 145 | module_init(olpc_xo1_init); |
149 | module_exit(olpc_xo1_exit); | 146 | module_exit(olpc_xo1_exit); |
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 4ce208f885ef..d39f63d017d2 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c | |||
@@ -142,8 +142,7 @@ void * __init prom_early_alloc(unsigned long size) | |||
142 | * wasted bootmem) and hand off chunks of it to callers. | 142 | * wasted bootmem) and hand off chunks of it to callers. |
143 | */ | 143 | */ |
144 | res = alloc_bootmem(chunk_size); | 144 | res = alloc_bootmem(chunk_size); |
145 | if (!res) | 145 | BUG_ON(!res); |
146 | return NULL; | ||
147 | prom_early_allocated += chunk_size; | 146 | prom_early_allocated += chunk_size; |
148 | memset(res, 0, chunk_size); | 147 | memset(res, 0, chunk_size); |
149 | free_mem = chunk_size; | 148 | free_mem = chunk_size; |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index df58e9cad96a..c58e0ea39ef5 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/delay.h> | ||
14 | 15 | ||
15 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
16 | #include <asm/uv/uv.h> | 17 | #include <asm/uv/uv.h> |
@@ -698,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
698 | struct mm_struct *mm, | 699 | struct mm_struct *mm, |
699 | unsigned long va, unsigned int cpu) | 700 | unsigned long va, unsigned int cpu) |
700 | { | 701 | { |
701 | int tcpu; | ||
702 | int uvhub; | ||
703 | int locals = 0; | 702 | int locals = 0; |
704 | int remotes = 0; | 703 | int remotes = 0; |
705 | int hubs = 0; | 704 | int hubs = 0; |
705 | int tcpu; | ||
706 | int tpnode; | ||
706 | struct bau_desc *bau_desc; | 707 | struct bau_desc *bau_desc; |
707 | struct cpumask *flush_mask; | 708 | struct cpumask *flush_mask; |
708 | struct ptc_stats *stat; | 709 | struct ptc_stats *stat; |
709 | struct bau_control *bcp; | 710 | struct bau_control *bcp; |
710 | struct bau_control *tbcp; | 711 | struct bau_control *tbcp; |
712 | struct hub_and_pnode *hpp; | ||
711 | 713 | ||
712 | /* kernel was booted 'nobau' */ | 714 | /* kernel was booted 'nobau' */ |
713 | if (nobau) | 715 | if (nobau) |
@@ -749,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
749 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; | 751 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; |
750 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 752 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
751 | 753 | ||
752 | /* cpu statistics */ | ||
753 | for_each_cpu(tcpu, flush_mask) { | 754 | for_each_cpu(tcpu, flush_mask) { |
754 | uvhub = uv_cpu_to_blade_id(tcpu); | 755 | /* |
755 | bau_uvhub_set(uvhub, &bau_desc->distribution); | 756 | * The distribution vector is a bit map of pnodes, relative |
756 | if (uvhub == bcp->uvhub) | 757 | * to the partition base pnode (and the partition base nasid |
758 | * in the header). | ||
759 | * Translate cpu to pnode and hub using an array stored | ||
760 | * in local memory. | ||
761 | */ | ||
762 | hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; | ||
763 | tpnode = hpp->pnode - bcp->partition_base_pnode; | ||
764 | bau_uvhub_set(tpnode, &bau_desc->distribution); | ||
765 | if (hpp->uvhub == bcp->uvhub) | ||
757 | locals++; | 766 | locals++; |
758 | else | 767 | else |
759 | remotes++; | 768 | remotes++; |
@@ -854,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
854 | * an interrupt, but causes an error message to be returned to | 863 | * an interrupt, but causes an error message to be returned to |
855 | * the sender. | 864 | * the sender. |
856 | */ | 865 | */ |
857 | static void uv_enable_timeouts(void) | 866 | static void __init uv_enable_timeouts(void) |
858 | { | 867 | { |
859 | int uvhub; | 868 | int uvhub; |
860 | int nuvhubs; | 869 | int nuvhubs; |
@@ -1325,10 +1334,10 @@ static int __init uv_ptc_init(void) | |||
1325 | } | 1334 | } |
1326 | 1335 | ||
1327 | /* | 1336 | /* |
1328 | * initialize the sending side's sending buffers | 1337 | * Initialize the sending side's sending buffers. |
1329 | */ | 1338 | */ |
1330 | static void | 1339 | static void |
1331 | uv_activation_descriptor_init(int node, int pnode) | 1340 | uv_activation_descriptor_init(int node, int pnode, int base_pnode) |
1332 | { | 1341 | { |
1333 | int i; | 1342 | int i; |
1334 | int cpu; | 1343 | int cpu; |
@@ -1351,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1351 | n = pa >> uv_nshift; | 1360 | n = pa >> uv_nshift; |
1352 | m = pa & uv_mmask; | 1361 | m = pa & uv_mmask; |
1353 | 1362 | ||
1363 | /* the 14-bit pnode */ | ||
1354 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, | 1364 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
1355 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); | 1365 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); |
1356 | |||
1357 | /* | 1366 | /* |
1358 | * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each | 1367 | * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each |
1359 | * cpu even though we only use the first one; one descriptor can | 1368 | * cpu even though we only use the first one; one descriptor can |
1360 | * describe a broadcast to 256 uv hubs. | 1369 | * describe a broadcast to 256 uv hubs. |
1361 | */ | 1370 | */ |
@@ -1364,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1364 | memset(bd2, 0, sizeof(struct bau_desc)); | 1373 | memset(bd2, 0, sizeof(struct bau_desc)); |
1365 | bd2->header.sw_ack_flag = 1; | 1374 | bd2->header.sw_ack_flag = 1; |
1366 | /* | 1375 | /* |
1367 | * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub | 1376 | * The base_dest_nasid set in the message header is the nasid |
1368 | * in the partition. The bit map will indicate uvhub numbers, | 1377 | * of the first uvhub in the partition. The bit map will |
1369 | * which are 0-N in a partition. Pnodes are unique system-wide. | 1378 | * indicate destination pnode numbers relative to that base. |
1379 | * They may not be consecutive if nasid striding is being used. | ||
1370 | */ | 1380 | */ |
1371 | bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | 1381 | bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); |
1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ | 1382 | bd2->header.dest_subnodeid = UV_LB_SUBNODEID; |
1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1383 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
1374 | bd2->header.int_both = 1; | 1384 | bd2->header.int_both = 1; |
1375 | /* | 1385 | /* |
@@ -1441,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode) | |||
1441 | /* | 1451 | /* |
1442 | * Initialization of each UV hub's structures | 1452 | * Initialization of each UV hub's structures |
1443 | */ | 1453 | */ |
1444 | static void __init uv_init_uvhub(int uvhub, int vector) | 1454 | static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) |
1445 | { | 1455 | { |
1446 | int node; | 1456 | int node; |
1447 | int pnode; | 1457 | int pnode; |
@@ -1449,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector) | |||
1449 | 1459 | ||
1450 | node = uvhub_to_first_node(uvhub); | 1460 | node = uvhub_to_first_node(uvhub); |
1451 | pnode = uv_blade_to_pnode(uvhub); | 1461 | pnode = uv_blade_to_pnode(uvhub); |
1452 | uv_activation_descriptor_init(node, pnode); | 1462 | uv_activation_descriptor_init(node, pnode, base_pnode); |
1453 | uv_payload_queue_init(node, pnode); | 1463 | uv_payload_queue_init(node, pnode); |
1454 | /* | 1464 | /* |
1455 | * the below initialization can't be in firmware because the | 1465 | * The below initialization can't be in firmware because the |
1456 | * messaging IRQ will be determined by the OS | 1466 | * messaging IRQ will be determined by the OS. |
1457 | */ | 1467 | */ |
1458 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; | 1468 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
1459 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 1469 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
@@ -1490,10 +1500,11 @@ calculate_destination_timeout(void) | |||
1490 | /* | 1500 | /* |
1491 | * initialize the bau_control structure for each cpu | 1501 | * initialize the bau_control structure for each cpu |
1492 | */ | 1502 | */ |
1493 | static int __init uv_init_per_cpu(int nuvhubs) | 1503 | static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) |
1494 | { | 1504 | { |
1495 | int i; | 1505 | int i; |
1496 | int cpu; | 1506 | int cpu; |
1507 | int tcpu; | ||
1497 | int pnode; | 1508 | int pnode; |
1498 | int uvhub; | 1509 | int uvhub; |
1499 | int have_hmaster; | 1510 | int have_hmaster; |
@@ -1527,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1527 | bcp = &per_cpu(bau_control, cpu); | 1538 | bcp = &per_cpu(bau_control, cpu); |
1528 | memset(bcp, 0, sizeof(struct bau_control)); | 1539 | memset(bcp, 0, sizeof(struct bau_control)); |
1529 | pnode = uv_cpu_hub_info(cpu)->pnode; | 1540 | pnode = uv_cpu_hub_info(cpu)->pnode; |
1541 | if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { | ||
1542 | printk(KERN_EMERG | ||
1543 | "cpu %d pnode %d-%d beyond %d; BAU disabled\n", | ||
1544 | cpu, pnode, base_part_pnode, | ||
1545 | UV_DISTRIBUTION_SIZE); | ||
1546 | return 1; | ||
1547 | } | ||
1548 | bcp->osnode = cpu_to_node(cpu); | ||
1549 | bcp->partition_base_pnode = uv_partition_base_pnode; | ||
1530 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; | 1550 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; |
1531 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); | 1551 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); |
1532 | bdp = &uvhub_descs[uvhub]; | 1552 | bdp = &uvhub_descs[uvhub]; |
@@ -1535,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1535 | bdp->pnode = pnode; | 1555 | bdp->pnode = pnode; |
1536 | /* kludge: 'assuming' one node per socket, and assuming that | 1556 | /* kludge: 'assuming' one node per socket, and assuming that |
1537 | disabling a socket just leaves a gap in node numbers */ | 1557 | disabling a socket just leaves a gap in node numbers */ |
1538 | socket = (cpu_to_node(cpu) & 1); | 1558 | socket = bcp->osnode & 1; |
1539 | bdp->socket_mask |= (1 << socket); | 1559 | bdp->socket_mask |= (1 << socket); |
1540 | sdp = &bdp->socket[socket]; | 1560 | sdp = &bdp->socket[socket]; |
1541 | sdp->cpu_number[sdp->num_cpus] = cpu; | 1561 | sdp->cpu_number[sdp->num_cpus] = cpu; |
@@ -1584,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs) | |||
1584 | nextsocket: | 1604 | nextsocket: |
1585 | socket++; | 1605 | socket++; |
1586 | socket_mask = (socket_mask >> 1); | 1606 | socket_mask = (socket_mask >> 1); |
1607 | /* each socket gets a local array of pnodes/hubs */ | ||
1608 | bcp = smaster; | ||
1609 | bcp->target_hub_and_pnode = kmalloc_node( | ||
1610 | sizeof(struct hub_and_pnode) * | ||
1611 | num_possible_cpus(), GFP_KERNEL, bcp->osnode); | ||
1612 | memset(bcp->target_hub_and_pnode, 0, | ||
1613 | sizeof(struct hub_and_pnode) * | ||
1614 | num_possible_cpus()); | ||
1615 | for_each_present_cpu(tcpu) { | ||
1616 | bcp->target_hub_and_pnode[tcpu].pnode = | ||
1617 | uv_cpu_hub_info(tcpu)->pnode; | ||
1618 | bcp->target_hub_and_pnode[tcpu].uvhub = | ||
1619 | uv_cpu_hub_info(tcpu)->numa_blade_id; | ||
1620 | } | ||
1587 | } | 1621 | } |
1588 | } | 1622 | } |
1589 | kfree(uvhub_descs); | 1623 | kfree(uvhub_descs); |
@@ -1636,21 +1670,22 @@ static int __init uv_bau_init(void) | |||
1636 | spin_lock_init(&disable_lock); | 1670 | spin_lock_init(&disable_lock); |
1637 | congested_cycles = microsec_2_cycles(congested_response_us); | 1671 | congested_cycles = microsec_2_cycles(congested_response_us); |
1638 | 1672 | ||
1639 | if (uv_init_per_cpu(nuvhubs)) { | ||
1640 | nobau = 1; | ||
1641 | return 0; | ||
1642 | } | ||
1643 | |||
1644 | uv_partition_base_pnode = 0x7fffffff; | 1673 | uv_partition_base_pnode = 0x7fffffff; |
1645 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) | 1674 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
1646 | if (uv_blade_nr_possible_cpus(uvhub) && | 1675 | if (uv_blade_nr_possible_cpus(uvhub) && |
1647 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) | 1676 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) |
1648 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); | 1677 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); |
1678 | } | ||
1679 | |||
1680 | if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { | ||
1681 | nobau = 1; | ||
1682 | return 0; | ||
1683 | } | ||
1649 | 1684 | ||
1650 | vector = UV_BAU_MESSAGE; | 1685 | vector = UV_BAU_MESSAGE; |
1651 | for_each_possible_blade(uvhub) | 1686 | for_each_possible_blade(uvhub) |
1652 | if (uv_blade_nr_possible_cpus(uvhub)) | 1687 | if (uv_blade_nr_possible_cpus(uvhub)) |
1653 | uv_init_uvhub(uvhub, vector); | 1688 | uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); |
1654 | 1689 | ||
1655 | uv_enable_timeouts(); | 1690 | uv_enable_timeouts(); |
1656 | alloc_intr_gate(vector, uv_bau_message_intr1); | 1691 | alloc_intr_gate(vector, uv_bau_message_intr1); |
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 7b24460917d5..374a05d8ad22 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c | |||
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
131 | unsigned long mmr_offset, int limit) | 131 | unsigned long mmr_offset, int limit) |
132 | { | 132 | { |
133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | 133 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
134 | struct irq_cfg *cfg = get_irq_chip_data(irq); | 134 | struct irq_cfg *cfg = irq_get_chip_data(irq); |
135 | unsigned long mmr_value; | 135 | unsigned long mmr_value; |
136 | struct uv_IO_APIC_route_entry *entry; | 136 | struct uv_IO_APIC_route_entry *entry; |
137 | int mmr_pnode, err; | 137 | int mmr_pnode, err; |
@@ -148,7 +148,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
148 | else | 148 | else |
149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 149 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
150 | 150 | ||
151 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | 151 | irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, |
152 | irq_name); | 152 | irq_name); |
153 | 153 | ||
154 | mmr_value = 0; | 154 | mmr_value = 0; |
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 9daf5d1af9f1..0eb90184515f 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -40,7 +40,6 @@ static struct clocksource clocksource_uv = { | |||
40 | .rating = 400, | 40 | .rating = 400, |
41 | .read = uv_read_rtc, | 41 | .read = uv_read_rtc, |
42 | .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, | 42 | .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, |
43 | .shift = 10, | ||
44 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 43 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
45 | }; | 44 | }; |
46 | 45 | ||
@@ -372,14 +371,11 @@ static __init int uv_rtc_setup_clock(void) | |||
372 | if (!is_uv_system()) | 371 | if (!is_uv_system()) |
373 | return -ENODEV; | 372 | return -ENODEV; |
374 | 373 | ||
375 | clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, | ||
376 | clocksource_uv.shift); | ||
377 | |||
378 | /* If single blade, prefer tsc */ | 374 | /* If single blade, prefer tsc */ |
379 | if (uv_num_possible_blades() == 1) | 375 | if (uv_num_possible_blades() == 1) |
380 | clocksource_uv.rating = 250; | 376 | clocksource_uv.rating = 250; |
381 | 377 | ||
382 | rc = clocksource_register(&clocksource_uv); | 378 | rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second); |
383 | if (rc) | 379 | if (rc) |
384 | printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); | 380 | printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc); |
385 | else | 381 | else |
diff --git a/arch/x86/platform/visws/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c index 632037671746..c7abf13a213f 100644 --- a/arch/x86/platform/visws/visws_quirks.c +++ b/arch/x86/platform/visws/visws_quirks.c | |||
@@ -471,15 +471,7 @@ static unsigned int startup_piix4_master_irq(struct irq_data *data) | |||
471 | { | 471 | { |
472 | legacy_pic->init(0); | 472 | legacy_pic->init(0); |
473 | enable_cobalt_irq(data); | 473 | enable_cobalt_irq(data); |
474 | } | 474 | return 0; |
475 | |||
476 | static void end_piix4_master_irq(struct irq_data *data) | ||
477 | { | ||
478 | unsigned long flags; | ||
479 | |||
480 | spin_lock_irqsave(&cobalt_lock, flags); | ||
481 | enable_cobalt_irq(data); | ||
482 | spin_unlock_irqrestore(&cobalt_lock, flags); | ||
483 | } | 475 | } |
484 | 476 | ||
485 | static struct irq_chip piix4_master_irq_type = { | 477 | static struct irq_chip piix4_master_irq_type = { |
@@ -492,7 +484,7 @@ static void pii4_mask(struct irq_data *data) { } | |||
492 | 484 | ||
493 | static struct irq_chip piix4_virtual_irq_type = { | 485 | static struct irq_chip piix4_virtual_irq_type = { |
494 | .name = "PIIX4-virtual", | 486 | .name = "PIIX4-virtual", |
495 | .mask = pii4_mask, | 487 | .irq_mask = pii4_mask, |
496 | }; | 488 | }; |
497 | 489 | ||
498 | /* | 490 | /* |
@@ -569,18 +561,20 @@ out_unlock: | |||
569 | static struct irqaction master_action = { | 561 | static struct irqaction master_action = { |
570 | .handler = piix4_master_intr, | 562 | .handler = piix4_master_intr, |
571 | .name = "PIIX4-8259", | 563 | .name = "PIIX4-8259", |
564 | .flags = IRQF_NO_THREAD, | ||
572 | }; | 565 | }; |
573 | 566 | ||
574 | static struct irqaction cascade_action = { | 567 | static struct irqaction cascade_action = { |
575 | .handler = no_action, | 568 | .handler = no_action, |
576 | .name = "cascade", | 569 | .name = "cascade", |
570 | .flags = IRQF_NO_THREAD, | ||
577 | }; | 571 | }; |
578 | 572 | ||
579 | static inline void set_piix4_virtual_irq_type(void) | 573 | static inline void set_piix4_virtual_irq_type(void) |
580 | { | 574 | { |
581 | piix4_virtual_irq_type.enable = i8259A_chip.unmask; | 575 | piix4_virtual_irq_type.irq_enable = i8259A_chip.irq_unmask; |
582 | piix4_virtual_irq_type.disable = i8259A_chip.mask; | 576 | piix4_virtual_irq_type.irq_disable = i8259A_chip.irq_mask; |
583 | piix4_virtual_irq_type.unmask = i8259A_chip.unmask; | 577 | piix4_virtual_irq_type.irq_unmask = i8259A_chip.irq_unmask; |
584 | } | 578 | } |
585 | 579 | ||
586 | static void __init visws_pre_intr_init(void) | 580 | static void __init visws_pre_intr_init(void) |
@@ -597,7 +591,7 @@ static void __init visws_pre_intr_init(void) | |||
597 | else if (i == CO_IRQ_IDE0) | 591 | else if (i == CO_IRQ_IDE0) |
598 | chip = &cobalt_irq_type; | 592 | chip = &cobalt_irq_type; |
599 | else if (i == CO_IRQ_IDE1) | 593 | else if (i == CO_IRQ_IDE1) |
600 | >chip = &cobalt_irq_type; | 594 | chip = &cobalt_irq_type; |
601 | else if (i == CO_IRQ_8259) | 595 | else if (i == CO_IRQ_8259) |
602 | chip = &piix4_master_irq_type; | 596 | chip = &piix4_master_irq_type; |
603 | else if (i < CO_IRQ_APIC0) | 597 | else if (i < CO_IRQ_APIC0) |
@@ -606,7 +600,7 @@ static void __init visws_pre_intr_init(void) | |||
606 | chip = &cobalt_irq_type; | 600 | chip = &cobalt_irq_type; |
607 | 601 | ||
608 | if (chip) | 602 | if (chip) |
609 | set_irq_chip(i, chip); | 603 | irq_set_chip(i, chip); |
610 | } | 604 | } |
611 | 605 | ||
612 | setup_irq(CO_IRQ_8259, &master_action); | 606 | setup_irq(CO_IRQ_8259, &master_action); |