diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-04-11 09:40:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-04-11 09:40:17 -0400 |
commit | b3967dc566bc89df19e9aeb87b2fd483418b02e6 (patch) | |
tree | 97a0fff4c1d59e3395f6b6f9d4a226da3bf58d28 | |
parent | cde227afe6b997dce08bcfc2aa6e373fb56857b0 (diff) | |
parent | 0ffe984917b9cd6ecc19ffbc06f35869d8c18df8 (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] Prefetch mmap_sem in ia64_do_page_fault()
[IA64] Failure to resume after INIT in user space
[IA64] Pass more data to the MCA/INIT notify_die hooks
[IA64] always map VGA framebuffer UC, even if it supports WB
[IA64] fix bug in ia64 __mutex_fastpath_trylock
[IA64] for_each_possible_cpu: ia64
[IA64] update HP CSR space discovery via ACPI
[IA64] Wire up new syscalls {set,get}_robust_list
[IA64] 'msg' may be used uninitialized in xpc_initiate_allocate()
[IA64] Wire up new syscall sync_file_range()
-rw-r--r-- | arch/ia64/kernel/acpi-ext.c | 143 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 33 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 3 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 2 | ||||
-rw-r--r-- | include/asm-ia64/acpi-ext.h | 11 | ||||
-rw-r--r-- | include/asm-ia64/kdebug.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/mca.h | 5 | ||||
-rw-r--r-- | include/asm-ia64/mutex.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/unistd.h | 5 | ||||
-rw-r--r-- | include/asm-ia64/vga.h | 2 |
13 files changed, 125 insertions, 98 deletions
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index 4a5574ff007b..fff82929d225 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c | |||
@@ -1,105 +1,104 @@ | |||
1 | /* | 1 | /* |
2 | * arch/ia64/kernel/acpi-ext.c | 2 | * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P. |
3 | * Alex Williamson <alex.williamson@hp.com> | ||
4 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | ||
3 | * | 5 | * |
4 | * Copyright (C) 2003 Hewlett-Packard | 6 | * This program is free software; you can redistribute it and/or modify |
5 | * Copyright (C) Alex Williamson | 7 | * it under the terms of the GNU General Public License version 2 as |
6 | * Copyright (C) Bjorn Helgaas | 8 | * published by the Free Software Foundation. |
7 | * | ||
8 | * Vendor specific extensions to ACPI. | ||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/config.h> | 11 | #include <linux/config.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/acpi.h> | 14 | #include <linux/acpi.h> |
15 | #include <linux/efi.h> | ||
16 | 15 | ||
17 | #include <asm/acpi-ext.h> | 16 | #include <asm/acpi-ext.h> |
18 | 17 | ||
19 | struct acpi_vendor_descriptor { | 18 | /* |
20 | u8 guid_id; | 19 | * Device CSRs that do not appear in PCI config space should be described |
21 | efi_guid_t guid; | 20 | * via ACPI. This would normally be done with Address Space Descriptors |
22 | }; | 21 | * marked as "consumer-only," but old versions of Windows and Linux ignore |
22 | * the producer/consumer flag, so HP invented a vendor-defined resource to | ||
23 | * describe the location and size of CSR space. | ||
24 | */ | ||
23 | 25 | ||
24 | struct acpi_vendor_info { | 26 | struct acpi_vendor_uuid hp_ccsr_uuid = { |
25 | struct acpi_vendor_descriptor *descriptor; | 27 | .subtype = 2, |
26 | u8 *data; | 28 | .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a, |
27 | u32 length; | 29 | 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }, |
28 | }; | 30 | }; |
29 | 31 | ||
30 | acpi_status | 32 | static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length) |
31 | acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | ||
32 | { | 33 | { |
33 | struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; | 34 | acpi_status status; |
34 | struct acpi_resource_vendor *vendor; | 35 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
35 | struct acpi_vendor_descriptor *descriptor; | 36 | struct acpi_resource *resource; |
36 | u32 byte_length; | 37 | struct acpi_resource_vendor_typed *vendor; |
37 | |||
38 | if (resource->type != ACPI_RESOURCE_TYPE_VENDOR) | ||
39 | return AE_OK; | ||
40 | |||
41 | vendor = (struct acpi_resource_vendor *)&resource->data; | ||
42 | descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data; | ||
43 | if (vendor->byte_length <= sizeof(*info->descriptor) || | ||
44 | descriptor->guid_id != info->descriptor->guid_id || | ||
45 | efi_guidcmp(descriptor->guid, info->descriptor->guid)) | ||
46 | return AE_OK; | ||
47 | |||
48 | byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor); | ||
49 | info->data = acpi_os_allocate(byte_length); | ||
50 | if (!info->data) | ||
51 | return AE_NO_MEMORY; | ||
52 | |||
53 | memcpy(info->data, | ||
54 | vendor->byte_data + sizeof(struct acpi_vendor_descriptor), | ||
55 | byte_length); | ||
56 | info->length = byte_length; | ||
57 | return AE_CTRL_TERMINATE; | ||
58 | } | ||
59 | 38 | ||
60 | acpi_status | 39 | status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid, |
61 | acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, | 40 | &buffer); |
62 | u8 ** data, u32 * byte_length) | ||
63 | { | ||
64 | struct acpi_vendor_info info; | ||
65 | 41 | ||
66 | info.descriptor = id; | 42 | resource = buffer.pointer; |
67 | info.data = NULL; | 43 | vendor = &resource->data.vendor_typed; |
68 | 44 | ||
69 | acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, | 45 | if (ACPI_FAILURE(status) || vendor->byte_length < 16) { |
70 | &info); | 46 | status = AE_NOT_FOUND; |
71 | if (!info.data) | 47 | goto exit; |
72 | return AE_NOT_FOUND; | 48 | } |
73 | 49 | ||
74 | *data = info.data; | 50 | memcpy(base, vendor->byte_data, sizeof(*base)); |
75 | *byte_length = info.length; | 51 | memcpy(length, vendor->byte_data + 8, sizeof(*length)); |
76 | return AE_OK; | 52 | |
53 | exit: | ||
54 | acpi_os_free(buffer.pointer); | ||
55 | return status; | ||
77 | } | 56 | } |
78 | 57 | ||
79 | struct acpi_vendor_descriptor hp_ccsr_descriptor = { | 58 | struct csr_space { |
80 | .guid_id = 2, | 59 | u64 base; |
81 | .guid = | 60 | u64 length; |
82 | EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, | ||
83 | 0x37, 0x0e, 0xad) | ||
84 | }; | 61 | }; |
85 | 62 | ||
86 | acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length) | 63 | static acpi_status find_csr_space(struct acpi_resource *resource, void *data) |
87 | { | 64 | { |
65 | struct csr_space *space = data; | ||
66 | struct acpi_resource_address64 addr; | ||
88 | acpi_status status; | 67 | acpi_status status; |
89 | u8 *data; | ||
90 | u32 length; | ||
91 | 68 | ||
92 | status = | 69 | status = acpi_resource_to_address64(resource, &addr); |
93 | acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); | 70 | if (ACPI_SUCCESS(status) && |
71 | addr.resource_type == ACPI_MEMORY_RANGE && | ||
72 | addr.address_length && | ||
73 | addr.producer_consumer == ACPI_CONSUMER) { | ||
74 | space->base = addr.minimum; | ||
75 | space->length = addr.address_length; | ||
76 | return AE_CTRL_TERMINATE; | ||
77 | } | ||
78 | return AE_OK; /* keep looking */ | ||
79 | } | ||
94 | 80 | ||
95 | if (ACPI_FAILURE(status) || length != 16) | 81 | static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length) |
96 | return AE_NOT_FOUND; | 82 | { |
83 | struct csr_space space = { 0, 0 }; | ||
97 | 84 | ||
98 | memcpy(csr_base, data, sizeof(*csr_base)); | 85 | acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space); |
99 | memcpy(csr_length, data + 8, sizeof(*csr_length)); | 86 | if (!space.length) |
100 | acpi_os_free(data); | 87 | return AE_NOT_FOUND; |
101 | 88 | ||
89 | *base = space.base; | ||
90 | *length = space.length; | ||
102 | return AE_OK; | 91 | return AE_OK; |
103 | } | 92 | } |
104 | 93 | ||
94 | acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) | ||
95 | { | ||
96 | acpi_status status; | ||
97 | |||
98 | status = hp_ccsr_locate(obj, csr_base, csr_length); | ||
99 | if (ACPI_SUCCESS(status)) | ||
100 | return status; | ||
101 | |||
102 | return hp_crs_locate(obj, csr_base, csr_length); | ||
103 | } | ||
105 | EXPORT_SYMBOL(hp_acpi_csr_space); | 104 | EXPORT_SYMBOL(hp_acpi_csr_space); |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 750e8e7fbdc3..6e16f6b35bd3 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1606,5 +1606,8 @@ sys_call_table: | |||
1606 | data8 sys_ni_syscall // 1295 reserved for ppoll | 1606 | data8 sys_ni_syscall // 1295 reserved for ppoll |
1607 | data8 sys_unshare | 1607 | data8 sys_unshare |
1608 | data8 sys_splice | 1608 | data8 sys_splice |
1609 | data8 sys_set_robust_list | ||
1610 | data8 sys_get_robust_list | ||
1611 | data8 sys_sync_file_range // 1300 | ||
1609 | 1612 | ||
1610 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1613 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8963171788d5..5e6fdbe78bcd 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -581,10 +581,12 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) | |||
581 | { | 581 | { |
582 | unsigned long flags; | 582 | unsigned long flags; |
583 | int cpu = smp_processor_id(); | 583 | int cpu = smp_processor_id(); |
584 | struct ia64_mca_notify_die nd = | ||
585 | { .sos = NULL, .monarch_cpu = &monarch_cpu }; | ||
584 | 586 | ||
585 | /* Mask all interrupts */ | 587 | /* Mask all interrupts */ |
586 | local_irq_save(flags); | 588 | local_irq_save(flags); |
587 | if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0) | 589 | if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0) |
588 | == NOTIFY_STOP) | 590 | == NOTIFY_STOP) |
589 | ia64_mca_spin(__FUNCTION__); | 591 | ia64_mca_spin(__FUNCTION__); |
590 | 592 | ||
@@ -594,7 +596,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) | |||
594 | */ | 596 | */ |
595 | ia64_sal_mc_rendez(); | 597 | ia64_sal_mc_rendez(); |
596 | 598 | ||
597 | if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0) | 599 | if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0) |
598 | == NOTIFY_STOP) | 600 | == NOTIFY_STOP) |
599 | ia64_mca_spin(__FUNCTION__); | 601 | ia64_mca_spin(__FUNCTION__); |
600 | 602 | ||
@@ -602,7 +604,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) | |||
602 | while (monarch_cpu != -1) | 604 | while (monarch_cpu != -1) |
603 | cpu_relax(); /* spin until monarch leaves */ | 605 | cpu_relax(); /* spin until monarch leaves */ |
604 | 606 | ||
605 | if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0) | 607 | if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0) |
606 | == NOTIFY_STOP) | 608 | == NOTIFY_STOP) |
607 | ia64_mca_spin(__FUNCTION__); | 609 | ia64_mca_spin(__FUNCTION__); |
608 | 610 | ||
@@ -1023,6 +1025,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1023 | &sos->proc_state_param; | 1025 | &sos->proc_state_param; |
1024 | int recover, cpu = smp_processor_id(); | 1026 | int recover, cpu = smp_processor_id(); |
1025 | task_t *previous_current; | 1027 | task_t *previous_current; |
1028 | struct ia64_mca_notify_die nd = | ||
1029 | { .sos = sos, .monarch_cpu = &monarch_cpu }; | ||
1026 | 1030 | ||
1027 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 1031 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
1028 | console_loglevel = 15; /* make sure printks make it to console */ | 1032 | console_loglevel = 15; /* make sure printks make it to console */ |
@@ -1031,7 +1035,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1031 | 1035 | ||
1032 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | 1036 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); |
1033 | monarch_cpu = cpu; | 1037 | monarch_cpu = cpu; |
1034 | if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0) | 1038 | if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) |
1035 | == NOTIFY_STOP) | 1039 | == NOTIFY_STOP) |
1036 | ia64_mca_spin(__FUNCTION__); | 1040 | ia64_mca_spin(__FUNCTION__); |
1037 | ia64_wait_for_slaves(cpu); | 1041 | ia64_wait_for_slaves(cpu); |
@@ -1043,7 +1047,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1043 | * spinning in SAL does not work. | 1047 | * spinning in SAL does not work. |
1044 | */ | 1048 | */ |
1045 | ia64_mca_wakeup_all(); | 1049 | ia64_mca_wakeup_all(); |
1046 | if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0) | 1050 | if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) |
1047 | == NOTIFY_STOP) | 1051 | == NOTIFY_STOP) |
1048 | ia64_mca_spin(__FUNCTION__); | 1052 | ia64_mca_spin(__FUNCTION__); |
1049 | 1053 | ||
@@ -1064,7 +1068,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1064 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 1068 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
1065 | sos->os_status = IA64_MCA_CORRECTED; | 1069 | sos->os_status = IA64_MCA_CORRECTED; |
1066 | } | 1070 | } |
1067 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover) | 1071 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
1068 | == NOTIFY_STOP) | 1072 | == NOTIFY_STOP) |
1069 | ia64_mca_spin(__FUNCTION__); | 1073 | ia64_mca_spin(__FUNCTION__); |
1070 | 1074 | ||
@@ -1351,10 +1355,14 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1351 | static atomic_t monarchs; | 1355 | static atomic_t monarchs; |
1352 | task_t *previous_current; | 1356 | task_t *previous_current; |
1353 | int cpu = smp_processor_id(); | 1357 | int cpu = smp_processor_id(); |
1358 | struct ia64_mca_notify_die nd = | ||
1359 | { .sos = sos, .monarch_cpu = &monarch_cpu }; | ||
1354 | 1360 | ||
1355 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 1361 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
1356 | console_loglevel = 15; /* make sure printks make it to console */ | 1362 | console_loglevel = 15; /* make sure printks make it to console */ |
1357 | 1363 | ||
1364 | (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); | ||
1365 | |||
1358 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", | 1366 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", |
1359 | sos->proc_state_param, cpu, sos->monarch); | 1367 | sos->proc_state_param, cpu, sos->monarch); |
1360 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); | 1368 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); |
@@ -1390,15 +1398,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1390 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; | 1398 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; |
1391 | while (monarch_cpu == -1) | 1399 | while (monarch_cpu == -1) |
1392 | cpu_relax(); /* spin until monarch enters */ | 1400 | cpu_relax(); /* spin until monarch enters */ |
1393 | if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0) | 1401 | if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) |
1394 | == NOTIFY_STOP) | 1402 | == NOTIFY_STOP) |
1395 | ia64_mca_spin(__FUNCTION__); | 1403 | ia64_mca_spin(__FUNCTION__); |
1396 | if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0) | 1404 | if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) |
1397 | == NOTIFY_STOP) | 1405 | == NOTIFY_STOP) |
1398 | ia64_mca_spin(__FUNCTION__); | 1406 | ia64_mca_spin(__FUNCTION__); |
1399 | while (monarch_cpu != -1) | 1407 | while (monarch_cpu != -1) |
1400 | cpu_relax(); /* spin until monarch leaves */ | 1408 | cpu_relax(); /* spin until monarch leaves */ |
1401 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0) | 1409 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) |
1402 | == NOTIFY_STOP) | 1410 | == NOTIFY_STOP) |
1403 | ia64_mca_spin(__FUNCTION__); | 1411 | ia64_mca_spin(__FUNCTION__); |
1404 | printk("Slave on cpu %d returning to normal service.\n", cpu); | 1412 | printk("Slave on cpu %d returning to normal service.\n", cpu); |
@@ -1409,7 +1417,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1409 | } | 1417 | } |
1410 | 1418 | ||
1411 | monarch_cpu = cpu; | 1419 | monarch_cpu = cpu; |
1412 | if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0) | 1420 | if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) |
1413 | == NOTIFY_STOP) | 1421 | == NOTIFY_STOP) |
1414 | ia64_mca_spin(__FUNCTION__); | 1422 | ia64_mca_spin(__FUNCTION__); |
1415 | 1423 | ||
@@ -1426,10 +1434,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1426 | * to default_monarch_init_process() above and just print all the | 1434 | * to default_monarch_init_process() above and just print all the |
1427 | * tasks. | 1435 | * tasks. |
1428 | */ | 1436 | */ |
1429 | if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0) | 1437 | if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) |
1430 | == NOTIFY_STOP) | 1438 | == NOTIFY_STOP) |
1431 | ia64_mca_spin(__FUNCTION__); | 1439 | ia64_mca_spin(__FUNCTION__); |
1432 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0) | 1440 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) |
1433 | == NOTIFY_STOP) | 1441 | == NOTIFY_STOP) |
1434 | ia64_mca_spin(__FUNCTION__); | 1442 | ia64_mca_spin(__FUNCTION__); |
1435 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | 1443 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); |
@@ -1631,6 +1639,7 @@ ia64_mca_init(void) | |||
1631 | printk(KERN_INFO "Increasing MCA rendezvous timeout from " | 1639 | printk(KERN_INFO "Increasing MCA rendezvous timeout from " |
1632 | "%ld to %ld milliseconds\n", timeout, isrv.v0); | 1640 | "%ld to %ld milliseconds\n", timeout, isrv.v0); |
1633 | timeout = isrv.v0; | 1641 | timeout = isrv.v0; |
1642 | (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); | ||
1634 | continue; | 1643 | continue; |
1635 | } | 1644 | } |
1636 | printk(KERN_ERR "Failed to register rendezvous interrupt " | 1645 | printk(KERN_ERR "Failed to register rendezvous interrupt " |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 60a464bfd9e2..6dff024cd62b 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -827,7 +827,7 @@ ia64_state_restore: | |||
827 | ld8 r9=[temp2],16 // sal_gp | 827 | ld8 r9=[temp2],16 // sal_gp |
828 | ;; | 828 | ;; |
829 | ld8 r22=[temp1],16 // pal_min_state, virtual | 829 | ld8 r22=[temp1],16 // pal_min_state, virtual |
830 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT | 830 | ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT |
831 | ;; | 831 | ;; |
832 | ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK | 832 | ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK |
833 | ld8 r20=[temp2],16 // prev_task | 833 | ld8 r20=[temp2],16 // prev_task |
@@ -848,7 +848,7 @@ ia64_state_restore: | |||
848 | mov cr.iim=temp3 | 848 | mov cr.iim=temp3 |
849 | mov cr.iha=temp4 | 849 | mov cr.iha=temp4 |
850 | dep r22=0,r22,62,1 // pal_min_state, physical, uncached | 850 | dep r22=0,r22,62,1 // pal_min_state, physical, uncached |
851 | mov IA64_KR(CURRENT)=r21 | 851 | mov IA64_KR(CURRENT)=r13 |
852 | ld8 r8=[temp1] // os_status | 852 | ld8 r8=[temp1] // os_status |
853 | ld8 r10=[temp2] // context | 853 | ld8 r10=[temp2] // context |
854 | 854 | ||
@@ -856,7 +856,7 @@ ia64_state_restore: | |||
856 | * avoid any dependencies on the algorithm in ia64_switch_to(), just | 856 | * avoid any dependencies on the algorithm in ia64_switch_to(), just |
857 | * purge any existing CURRENT_STACK mapping and insert the new one. | 857 | * purge any existing CURRENT_STACK mapping and insert the new one. |
858 | * | 858 | * |
859 | * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains | 859 | * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains |
860 | * prev_IA64_KR_CURRENT, these values may have been changed by the C | 860 | * prev_IA64_KR_CURRENT, these values may have been changed by the C |
861 | * code. Do not use r8, r9, r10, r22, they contain values ready for | 861 | * code. Do not use r8, r9, r10, r22, they contain values ready for |
862 | * the return to SAL. | 862 | * the return to SAL. |
@@ -873,7 +873,7 @@ ia64_state_restore: | |||
873 | ;; | 873 | ;; |
874 | srlz.d | 874 | srlz.d |
875 | 875 | ||
876 | extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT | 876 | extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT |
877 | shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK | 877 | shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK |
878 | movl r21=PAGE_KERNEL // page properties | 878 | movl r21=PAGE_KERNEL // page properties |
879 | ;; | 879 | ;; |
@@ -883,7 +883,7 @@ ia64_state_restore: | |||
883 | (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( | 883 | (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( |
884 | ;; | 884 | ;; |
885 | mov cr.itir=r18 | 885 | mov cr.itir=r18 |
886 | mov cr.ifa=r21 | 886 | mov cr.ifa=r13 |
887 | mov r20=IA64_TR_CURRENT_STACK | 887 | mov r20=IA64_TR_CURRENT_STACK |
888 | ;; | 888 | ;; |
889 | itr.d dtr[r20]=r21 | 889 | itr.d dtr[r20]=r21 |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 7a2f0a798d12..3a30cfc9574f 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
@@ -947,7 +947,7 @@ void | |||
947 | percpu_modcopy (void *pcpudst, const void *src, unsigned long size) | 947 | percpu_modcopy (void *pcpudst, const void *src, unsigned long size) |
948 | { | 948 | { |
949 | unsigned int i; | 949 | unsigned int i; |
950 | for_each_cpu(i) { | 950 | for_each_possible_cpu(i) { |
951 | memcpy(pcpudst + __per_cpu_offset[i], src, size); | 951 | memcpy(pcpudst + __per_cpu_offset[i], src, size); |
952 | } | 952 | } |
953 | } | 953 | } |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index af7eb087dca7..d98ec49570b8 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -60,6 +60,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
60 | struct siginfo si; | 60 | struct siginfo si; |
61 | unsigned long mask; | 61 | unsigned long mask; |
62 | 62 | ||
63 | /* mmap_sem is performance critical.... */ | ||
64 | prefetchw(&mm->mmap_sem); | ||
65 | |||
63 | /* | 66 | /* |
64 | * If we're in an interrupt or have no user context, we must not take the fault.. | 67 | * If we're in an interrupt or have no user context, we must not take the fault.. |
65 | */ | 68 | */ |
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index d0abddd9ffe6..8255a9be4632 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -1831,7 +1831,7 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) | |||
1831 | { | 1831 | { |
1832 | struct xpc_partition *part = &xpc_partitions[partid]; | 1832 | struct xpc_partition *part = &xpc_partitions[partid]; |
1833 | enum xpc_retval ret = xpcUnknownReason; | 1833 | enum xpc_retval ret = xpcUnknownReason; |
1834 | struct xpc_msg *msg; | 1834 | struct xpc_msg *msg = NULL; |
1835 | 1835 | ||
1836 | 1836 | ||
1837 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 1837 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |
diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h index 56d2ddc97b30..734d137dda6e 100644 --- a/include/asm-ia64/acpi-ext.h +++ b/include/asm-ia64/acpi-ext.h | |||
@@ -1,12 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * ia64/platform/hp/common/hp_acpi.h | 2 | * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P. |
3 | * Alex Williamson <alex.williamson@hp.com> | ||
4 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | ||
3 | * | 5 | * |
4 | * Copyright (C) 2003 Hewlett-Packard | 6 | * This program is free software; you can redistribute it and/or modify |
5 | * Copyright (C) Alex Williamson | 7 | * it under the terms of the GNU General Public License version 2 as |
6 | * Copyright (C) Bjorn Helgaas | 8 | * published by the Free Software Foundation. |
7 | * | 9 | * |
8 | * Vendor specific extensions to ACPI. | 10 | * Vendor specific extensions to ACPI. |
9 | */ | 11 | */ |
12 | |||
10 | #ifndef _ASM_IA64_ACPI_EXT_H | 13 | #ifndef _ASM_IA64_ACPI_EXT_H |
11 | #define _ASM_IA64_ACPI_EXT_H | 14 | #define _ASM_IA64_ACPI_EXT_H |
12 | 15 | ||
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 218c458ab60c..c195a9ad1255 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h | |||
@@ -58,6 +58,8 @@ enum die_val { | |||
58 | DIE_MCA_RENDZVOUS_ENTER, | 58 | DIE_MCA_RENDZVOUS_ENTER, |
59 | DIE_MCA_RENDZVOUS_PROCESS, | 59 | DIE_MCA_RENDZVOUS_PROCESS, |
60 | DIE_MCA_RENDZVOUS_LEAVE, | 60 | DIE_MCA_RENDZVOUS_LEAVE, |
61 | DIE_MCA_NEW_TIMEOUT, | ||
62 | DIE_INIT_ENTER, | ||
61 | DIE_INIT_MONARCH_ENTER, | 63 | DIE_INIT_MONARCH_ENTER, |
62 | DIE_INIT_MONARCH_PROCESS, | 64 | DIE_INIT_MONARCH_PROCESS, |
63 | DIE_INIT_MONARCH_LEAVE, | 65 | DIE_INIT_MONARCH_LEAVE, |
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index bfbbb8da79c7..9c5389b7e623 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -148,6 +148,11 @@ extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *) | |||
148 | extern void ia64_unreg_MCA_extension(void); | 148 | extern void ia64_unreg_MCA_extension(void); |
149 | extern u64 ia64_get_rnat(u64 *); | 149 | extern u64 ia64_get_rnat(u64 *); |
150 | 150 | ||
151 | struct ia64_mca_notify_die { | ||
152 | struct ia64_sal_os_state *sos; | ||
153 | int *monarch_cpu; | ||
154 | }; | ||
155 | |||
151 | #else /* __ASSEMBLY__ */ | 156 | #else /* __ASSEMBLY__ */ |
152 | 157 | ||
153 | #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ | 158 | #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ |
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h index 5a3224f6af38..bed73a643a56 100644 --- a/include/asm-ia64/mutex.h +++ b/include/asm-ia64/mutex.h | |||
@@ -84,7 +84,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
84 | static inline int | 84 | static inline int |
85 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 85 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
86 | { | 86 | { |
87 | if (likely(cmpxchg_acq(count, 1, 0)) == 1) | 87 | if (cmpxchg_acq(count, 1, 0) == 1) |
88 | return 1; | 88 | return 1; |
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 36070c1014d8..1c749acca021 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h | |||
@@ -286,12 +286,15 @@ | |||
286 | /* 1294, 1295 reserved for pselect/ppoll */ | 286 | /* 1294, 1295 reserved for pselect/ppoll */ |
287 | #define __NR_unshare 1296 | 287 | #define __NR_unshare 1296 |
288 | #define __NR_splice 1297 | 288 | #define __NR_splice 1297 |
289 | #define __NR_set_robust_list 1298 | ||
290 | #define __NR_get_robust_list 1299 | ||
291 | #define __NR_sync_file_range 1300 | ||
289 | 292 | ||
290 | #ifdef __KERNEL__ | 293 | #ifdef __KERNEL__ |
291 | 294 | ||
292 | #include <linux/config.h> | 295 | #include <linux/config.h> |
293 | 296 | ||
294 | #define NR_syscalls 274 /* length of syscall table */ | 297 | #define NR_syscalls 277 /* length of syscall table */ |
295 | 298 | ||
296 | #define __ARCH_WANT_SYS_RT_SIGACTION | 299 | #define __ARCH_WANT_SYS_RT_SIGACTION |
297 | 300 | ||
diff --git a/include/asm-ia64/vga.h b/include/asm-ia64/vga.h index bc3349ffc505..091177cda223 100644 --- a/include/asm-ia64/vga.h +++ b/include/asm-ia64/vga.h | |||
@@ -17,7 +17,7 @@ | |||
17 | extern unsigned long vga_console_iobase; | 17 | extern unsigned long vga_console_iobase; |
18 | extern unsigned long vga_console_membase; | 18 | extern unsigned long vga_console_membase; |
19 | 19 | ||
20 | #define VGA_MAP_MEM(x) ((unsigned long) ioremap(vga_console_membase + (x), 0)) | 20 | #define VGA_MAP_MEM(x) ((unsigned long) ioremap_nocache(vga_console_membase + (x), 0)) |
21 | 21 | ||
22 | #define vga_readb(x) (*(x)) | 22 | #define vga_readb(x) (*(x)) |
23 | #define vga_writeb(x,y) (*(y) = (x)) | 23 | #define vga_writeb(x,y) (*(y) = (x)) |