aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-04-11 09:40:17 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-11 09:40:17 -0400
commitb3967dc566bc89df19e9aeb87b2fd483418b02e6 (patch)
tree97a0fff4c1d59e3395f6b6f9d4a226da3bf58d28 /arch
parentcde227afe6b997dce08bcfc2aa6e373fb56857b0 (diff)
parent0ffe984917b9cd6ecc19ffbc06f35869d8c18df8 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Prefetch mmap_sem in ia64_do_page_fault() [IA64] Failure to resume after INIT in user space [IA64] Pass more data to the MCA/INIT notify_die hooks [IA64] always map VGA framebuffer UC, even if it supports WB [IA64] fix bug in ia64 __mutex_fastpath_trylock [IA64] for_each_possible_cpu: ia64 [IA64] update HP CSR space discovery via ACPI [IA64] Wire up new syscalls {set,get}_robust_list [IA64] 'msg' may be used uninitialized in xpc_initiate_allocate() [IA64] Wire up new syscall sync_file_range()
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/acpi-ext.c143
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/mca.c33
-rw-r--r--arch/ia64/kernel/mca_asm.S10
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/mm/fault.c3
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c2
7 files changed, 105 insertions, 91 deletions
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 4a5574ff007b..fff82929d225 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -1,105 +1,104 @@
1/* 1/*
2 * arch/ia64/kernel/acpi-ext.c 2 * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
3 * Alex Williamson <alex.williamson@hp.com>
4 * Bjorn Helgaas <bjorn.helgaas@hp.com>
3 * 5 *
4 * Copyright (C) 2003 Hewlett-Packard 6 * This program is free software; you can redistribute it and/or modify
5 * Copyright (C) Alex Williamson 7 * it under the terms of the GNU General Public License version 2 as
6 * Copyright (C) Bjorn Helgaas 8 * published by the Free Software Foundation.
7 *
8 * Vendor specific extensions to ACPI.
9 */ 9 */
10 10
11#include <linux/config.h> 11#include <linux/config.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/efi.h>
16 15
17#include <asm/acpi-ext.h> 16#include <asm/acpi-ext.h>
18 17
19struct acpi_vendor_descriptor { 18/*
20 u8 guid_id; 19 * Device CSRs that do not appear in PCI config space should be described
21 efi_guid_t guid; 20 * via ACPI. This would normally be done with Address Space Descriptors
22}; 21 * marked as "consumer-only," but old versions of Windows and Linux ignore
22 * the producer/consumer flag, so HP invented a vendor-defined resource to
23 * describe the location and size of CSR space.
24 */
23 25
24struct acpi_vendor_info { 26struct acpi_vendor_uuid hp_ccsr_uuid = {
25 struct acpi_vendor_descriptor *descriptor; 27 .subtype = 2,
26 u8 *data; 28 .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
27 u32 length; 29 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
28}; 30};
29 31
30acpi_status 32static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
31acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
32{ 33{
33 struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; 34 acpi_status status;
34 struct acpi_resource_vendor *vendor; 35 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
35 struct acpi_vendor_descriptor *descriptor; 36 struct acpi_resource *resource;
36 u32 byte_length; 37 struct acpi_resource_vendor_typed *vendor;
37
38 if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
39 return AE_OK;
40
41 vendor = (struct acpi_resource_vendor *)&resource->data;
42 descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
43 if (vendor->byte_length <= sizeof(*info->descriptor) ||
44 descriptor->guid_id != info->descriptor->guid_id ||
45 efi_guidcmp(descriptor->guid, info->descriptor->guid))
46 return AE_OK;
47
48 byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
49 info->data = acpi_os_allocate(byte_length);
50 if (!info->data)
51 return AE_NO_MEMORY;
52
53 memcpy(info->data,
54 vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
55 byte_length);
56 info->length = byte_length;
57 return AE_CTRL_TERMINATE;
58}
59 38
60acpi_status 39 status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid,
61acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, 40 &buffer);
62 u8 ** data, u32 * byte_length)
63{
64 struct acpi_vendor_info info;
65 41
66 info.descriptor = id; 42 resource = buffer.pointer;
67 info.data = NULL; 43 vendor = &resource->data.vendor_typed;
68 44
69 acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, 45 if (ACPI_FAILURE(status) || vendor->byte_length < 16) {
70 &info); 46 status = AE_NOT_FOUND;
71 if (!info.data) 47 goto exit;
72 return AE_NOT_FOUND; 48 }
73 49
74 *data = info.data; 50 memcpy(base, vendor->byte_data, sizeof(*base));
75 *byte_length = info.length; 51 memcpy(length, vendor->byte_data + 8, sizeof(*length));
76 return AE_OK; 52
53 exit:
54 acpi_os_free(buffer.pointer);
55 return status;
77} 56}
78 57
79struct acpi_vendor_descriptor hp_ccsr_descriptor = { 58struct csr_space {
80 .guid_id = 2, 59 u64 base;
81 .guid = 60 u64 length;
82 EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01,
83 0x37, 0x0e, 0xad)
84}; 61};
85 62
86acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length) 63static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
87{ 64{
65 struct csr_space *space = data;
66 struct acpi_resource_address64 addr;
88 acpi_status status; 67 acpi_status status;
89 u8 *data;
90 u32 length;
91 68
92 status = 69 status = acpi_resource_to_address64(resource, &addr);
93 acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); 70 if (ACPI_SUCCESS(status) &&
71 addr.resource_type == ACPI_MEMORY_RANGE &&
72 addr.address_length &&
73 addr.producer_consumer == ACPI_CONSUMER) {
74 space->base = addr.minimum;
75 space->length = addr.address_length;
76 return AE_CTRL_TERMINATE;
77 }
78 return AE_OK; /* keep looking */
79}
94 80
95 if (ACPI_FAILURE(status) || length != 16) 81static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length)
96 return AE_NOT_FOUND; 82{
83 struct csr_space space = { 0, 0 };
97 84
98 memcpy(csr_base, data, sizeof(*csr_base)); 85 acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space);
99 memcpy(csr_length, data + 8, sizeof(*csr_length)); 86 if (!space.length)
100 acpi_os_free(data); 87 return AE_NOT_FOUND;
101 88
89 *base = space.base;
90 *length = space.length;
102 return AE_OK; 91 return AE_OK;
103} 92}
104 93
94acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
95{
96 acpi_status status;
97
98 status = hp_ccsr_locate(obj, csr_base, csr_length);
99 if (ACPI_SUCCESS(status))
100 return status;
101
102 return hp_crs_locate(obj, csr_base, csr_length);
103}
105EXPORT_SYMBOL(hp_acpi_csr_space); 104EXPORT_SYMBOL(hp_acpi_csr_space);
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 750e8e7fbdc3..6e16f6b35bd3 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1606,5 +1606,8 @@ sys_call_table:
1606 data8 sys_ni_syscall // 1295 reserved for ppoll 1606 data8 sys_ni_syscall // 1295 reserved for ppoll
1607 data8 sys_unshare 1607 data8 sys_unshare
1608 data8 sys_splice 1608 data8 sys_splice
1609 data8 sys_set_robust_list
1610 data8 sys_get_robust_list
1611 data8 sys_sync_file_range // 1300
1609 1612
1610 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1613 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8963171788d5..5e6fdbe78bcd 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -581,10 +581,12 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
581{ 581{
582 unsigned long flags; 582 unsigned long flags;
583 int cpu = smp_processor_id(); 583 int cpu = smp_processor_id();
584 struct ia64_mca_notify_die nd =
585 { .sos = NULL, .monarch_cpu = &monarch_cpu };
584 586
585 /* Mask all interrupts */ 587 /* Mask all interrupts */
586 local_irq_save(flags); 588 local_irq_save(flags);
587 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0) 589 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0)
588 == NOTIFY_STOP) 590 == NOTIFY_STOP)
589 ia64_mca_spin(__FUNCTION__); 591 ia64_mca_spin(__FUNCTION__);
590 592
@@ -594,7 +596,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
594 */ 596 */
595 ia64_sal_mc_rendez(); 597 ia64_sal_mc_rendez();
596 598
597 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0) 599 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0)
598 == NOTIFY_STOP) 600 == NOTIFY_STOP)
599 ia64_mca_spin(__FUNCTION__); 601 ia64_mca_spin(__FUNCTION__);
600 602
@@ -602,7 +604,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
602 while (monarch_cpu != -1) 604 while (monarch_cpu != -1)
603 cpu_relax(); /* spin until monarch leaves */ 605 cpu_relax(); /* spin until monarch leaves */
604 606
605 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0) 607 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0)
606 == NOTIFY_STOP) 608 == NOTIFY_STOP)
607 ia64_mca_spin(__FUNCTION__); 609 ia64_mca_spin(__FUNCTION__);
608 610
@@ -1023,6 +1025,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1023 &sos->proc_state_param; 1025 &sos->proc_state_param;
1024 int recover, cpu = smp_processor_id(); 1026 int recover, cpu = smp_processor_id();
1025 task_t *previous_current; 1027 task_t *previous_current;
1028 struct ia64_mca_notify_die nd =
1029 { .sos = sos, .monarch_cpu = &monarch_cpu };
1026 1030
1027 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ 1031 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
1028 console_loglevel = 15; /* make sure printks make it to console */ 1032 console_loglevel = 15; /* make sure printks make it to console */
@@ -1031,7 +1035,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1031 1035
1032 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1036 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1033 monarch_cpu = cpu; 1037 monarch_cpu = cpu;
1034 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0) 1038 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
1035 == NOTIFY_STOP) 1039 == NOTIFY_STOP)
1036 ia64_mca_spin(__FUNCTION__); 1040 ia64_mca_spin(__FUNCTION__);
1037 ia64_wait_for_slaves(cpu); 1041 ia64_wait_for_slaves(cpu);
@@ -1043,7 +1047,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1043 * spinning in SAL does not work. 1047 * spinning in SAL does not work.
1044 */ 1048 */
1045 ia64_mca_wakeup_all(); 1049 ia64_mca_wakeup_all();
1046 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0) 1050 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1047 == NOTIFY_STOP) 1051 == NOTIFY_STOP)
1048 ia64_mca_spin(__FUNCTION__); 1052 ia64_mca_spin(__FUNCTION__);
1049 1053
@@ -1064,7 +1068,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1064 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 1068 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1065 sos->os_status = IA64_MCA_CORRECTED; 1069 sos->os_status = IA64_MCA_CORRECTED;
1066 } 1070 }
1067 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover) 1071 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1068 == NOTIFY_STOP) 1072 == NOTIFY_STOP)
1069 ia64_mca_spin(__FUNCTION__); 1073 ia64_mca_spin(__FUNCTION__);
1070 1074
@@ -1351,10 +1355,14 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1351 static atomic_t monarchs; 1355 static atomic_t monarchs;
1352 task_t *previous_current; 1356 task_t *previous_current;
1353 int cpu = smp_processor_id(); 1357 int cpu = smp_processor_id();
1358 struct ia64_mca_notify_die nd =
1359 { .sos = sos, .monarch_cpu = &monarch_cpu };
1354 1360
1355 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ 1361 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
1356 console_loglevel = 15; /* make sure printks make it to console */ 1362 console_loglevel = 15; /* make sure printks make it to console */
1357 1363
1364 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
1365
1358 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1366 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1359 sos->proc_state_param, cpu, sos->monarch); 1367 sos->proc_state_param, cpu, sos->monarch);
1360 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); 1368 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
@@ -1390,15 +1398,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1390 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1398 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1391 while (monarch_cpu == -1) 1399 while (monarch_cpu == -1)
1392 cpu_relax(); /* spin until monarch enters */ 1400 cpu_relax(); /* spin until monarch enters */
1393 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0) 1401 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
1394 == NOTIFY_STOP) 1402 == NOTIFY_STOP)
1395 ia64_mca_spin(__FUNCTION__); 1403 ia64_mca_spin(__FUNCTION__);
1396 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0) 1404 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1397 == NOTIFY_STOP) 1405 == NOTIFY_STOP)
1398 ia64_mca_spin(__FUNCTION__); 1406 ia64_mca_spin(__FUNCTION__);
1399 while (monarch_cpu != -1) 1407 while (monarch_cpu != -1)
1400 cpu_relax(); /* spin until monarch leaves */ 1408 cpu_relax(); /* spin until monarch leaves */
1401 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0) 1409 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1402 == NOTIFY_STOP) 1410 == NOTIFY_STOP)
1403 ia64_mca_spin(__FUNCTION__); 1411 ia64_mca_spin(__FUNCTION__);
1404 printk("Slave on cpu %d returning to normal service.\n", cpu); 1412 printk("Slave on cpu %d returning to normal service.\n", cpu);
@@ -1409,7 +1417,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1409 } 1417 }
1410 1418
1411 monarch_cpu = cpu; 1419 monarch_cpu = cpu;
1412 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0) 1420 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
1413 == NOTIFY_STOP) 1421 == NOTIFY_STOP)
1414 ia64_mca_spin(__FUNCTION__); 1422 ia64_mca_spin(__FUNCTION__);
1415 1423
@@ -1426,10 +1434,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1426 * to default_monarch_init_process() above and just print all the 1434 * to default_monarch_init_process() above and just print all the
1427 * tasks. 1435 * tasks.
1428 */ 1436 */
1429 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0) 1437 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1430 == NOTIFY_STOP) 1438 == NOTIFY_STOP)
1431 ia64_mca_spin(__FUNCTION__); 1439 ia64_mca_spin(__FUNCTION__);
1432 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0) 1440 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1433 == NOTIFY_STOP) 1441 == NOTIFY_STOP)
1434 ia64_mca_spin(__FUNCTION__); 1442 ia64_mca_spin(__FUNCTION__);
1435 printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1443 printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
@@ -1631,6 +1639,7 @@ ia64_mca_init(void)
1631 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1639 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1632 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1640 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1633 timeout = isrv.v0; 1641 timeout = isrv.v0;
1642 (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
1634 continue; 1643 continue;
1635 } 1644 }
1636 printk(KERN_ERR "Failed to register rendezvous interrupt " 1645 printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 60a464bfd9e2..6dff024cd62b 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -827,7 +827,7 @@ ia64_state_restore:
827 ld8 r9=[temp2],16 // sal_gp 827 ld8 r9=[temp2],16 // sal_gp
828 ;; 828 ;;
829 ld8 r22=[temp1],16 // pal_min_state, virtual 829 ld8 r22=[temp1],16 // pal_min_state, virtual
830 ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT 830 ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
831 ;; 831 ;;
832 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK 832 ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
833 ld8 r20=[temp2],16 // prev_task 833 ld8 r20=[temp2],16 // prev_task
@@ -848,7 +848,7 @@ ia64_state_restore:
848 mov cr.iim=temp3 848 mov cr.iim=temp3
849 mov cr.iha=temp4 849 mov cr.iha=temp4
850 dep r22=0,r22,62,1 // pal_min_state, physical, uncached 850 dep r22=0,r22,62,1 // pal_min_state, physical, uncached
851 mov IA64_KR(CURRENT)=r21 851 mov IA64_KR(CURRENT)=r13
852 ld8 r8=[temp1] // os_status 852 ld8 r8=[temp1] // os_status
853 ld8 r10=[temp2] // context 853 ld8 r10=[temp2] // context
854 854
@@ -856,7 +856,7 @@ ia64_state_restore:
856 * avoid any dependencies on the algorithm in ia64_switch_to(), just 856 * avoid any dependencies on the algorithm in ia64_switch_to(), just
857 * purge any existing CURRENT_STACK mapping and insert the new one. 857 * purge any existing CURRENT_STACK mapping and insert the new one.
858 * 858 *
859 * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains 859 * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
860 * prev_IA64_KR_CURRENT, these values may have been changed by the C 860 * prev_IA64_KR_CURRENT, these values may have been changed by the C
861 * code. Do not use r8, r9, r10, r22, they contain values ready for 861 * code. Do not use r8, r9, r10, r22, they contain values ready for
862 * the return to SAL. 862 * the return to SAL.
@@ -873,7 +873,7 @@ ia64_state_restore:
873 ;; 873 ;;
874 srlz.d 874 srlz.d
875 875
876 extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT 876 extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
877 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK 877 shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
878 movl r21=PAGE_KERNEL // page properties 878 movl r21=PAGE_KERNEL // page properties
879 ;; 879 ;;
@@ -883,7 +883,7 @@ ia64_state_restore:
883(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( 883(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
884 ;; 884 ;;
885 mov cr.itir=r18 885 mov cr.itir=r18
886 mov cr.ifa=r21 886 mov cr.ifa=r13
887 mov r20=IA64_TR_CURRENT_STACK 887 mov r20=IA64_TR_CURRENT_STACK
888 ;; 888 ;;
889 itr.d dtr[r20]=r21 889 itr.d dtr[r20]=r21
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 7a2f0a798d12..3a30cfc9574f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -947,7 +947,7 @@ void
947percpu_modcopy (void *pcpudst, const void *src, unsigned long size) 947percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
948{ 948{
949 unsigned int i; 949 unsigned int i;
950 for_each_cpu(i) { 950 for_each_possible_cpu(i) {
951 memcpy(pcpudst + __per_cpu_offset[i], src, size); 951 memcpy(pcpudst + __per_cpu_offset[i], src, size);
952 } 952 }
953} 953}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index af7eb087dca7..d98ec49570b8 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -60,6 +60,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
60 struct siginfo si; 60 struct siginfo si;
61 unsigned long mask; 61 unsigned long mask;
62 62
63 /* mmap_sem is performance critical.... */
64 prefetchw(&mm->mmap_sem);
65
63 /* 66 /*
64 * If we're in an interrupt or have no user context, we must not take the fault.. 67 * If we're in an interrupt or have no user context, we must not take the fault..
65 */ 68 */
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index d0abddd9ffe6..8255a9be4632 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -1831,7 +1831,7 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1831{ 1831{
1832 struct xpc_partition *part = &xpc_partitions[partid]; 1832 struct xpc_partition *part = &xpc_partitions[partid];
1833 enum xpc_retval ret = xpcUnknownReason; 1833 enum xpc_retval ret = xpcUnknownReason;
1834 struct xpc_msg *msg; 1834 struct xpc_msg *msg = NULL;
1835 1835
1836 1836
1837 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); 1837 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);