aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-18 12:33:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-18 12:33:07 -0400
commitfa877c71e2136bd682b45022c96d5e073ced9f58 (patch)
treef45a582fb31cb51dbe14ea24a13fb98f2798e04e /arch/ia64
parent9eead2a8115d2a6aecf267c292f751f7761fa5f8 (diff)
parent6ae86350857bf3e862f8dcd10039ccb45e056f85 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Clean up linker script using standard macros. [IA64] Use standard macros for page-aligned data. [IA64] Use .ref.text, not .text.init for start_ap. [IA64] sgi-xp: fix printk format warnings [IA64] ioc4_serial: fix printk format warnings [IA64] mbcs: fix printk format warnings [IA64] pci_br, fix infinite loop in find_free_ate() [IA64] kdump: Short path to freeze CPUs [IA64] kdump: Try INIT regardless of [IA64] kdump: Mask INIT first in panic-kdump path [IA64] kdump: Don't return APs to SAL from kdump [IA64] kexec: Unregister MCA handler before kexec [IA64] kexec: Make INIT safe while transition to [IA64] kdump: Mask MCA/INIT on frozen cpus Fix up conflict in arch/ia64/kernel/vmlinux.lds.S as per Tony's suggestion.
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/mca.h2
-rw-r--r--arch/ia64/kernel/crash.c83
-rw-r--r--arch/ia64/kernel/head.S6
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/mca.c15
-rw-r--r--arch/ia64/kernel/mca_asm.S47
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S113
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c2
9 files changed, 154 insertions, 131 deletions
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 44a0b53df900..c171cdf0a789 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -145,12 +145,14 @@ extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
145extern void ia64_init_handler(struct pt_regs *, 145extern void ia64_init_handler(struct pt_regs *,
146 struct switch_stack *, 146 struct switch_stack *,
147 struct ia64_sal_os_state *); 147 struct ia64_sal_os_state *);
148extern void ia64_os_init_on_kdump(void);
148extern void ia64_monarch_init_handler(void); 149extern void ia64_monarch_init_handler(void);
149extern void ia64_slave_init_handler(void); 150extern void ia64_slave_init_handler(void);
150extern void ia64_mca_cmc_vector_setup(void); 151extern void ia64_mca_cmc_vector_setup(void);
151extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); 152extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
152extern void ia64_unreg_MCA_extension(void); 153extern void ia64_unreg_MCA_extension(void);
153extern unsigned long ia64_get_rnat(unsigned long *); 154extern unsigned long ia64_get_rnat(unsigned long *);
155extern void ia64_set_psr_mc(void);
154extern void ia64_mca_printk(const char * fmt, ...) 156extern void ia64_mca_printk(const char * fmt, ...)
155 __attribute__ ((format (printf, 1, 2))); 157 __attribute__ ((format (printf, 1, 2)));
156 158
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index f065093f8e9b..6631a9dfafdc 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -23,6 +23,7 @@
23int kdump_status[NR_CPUS]; 23int kdump_status[NR_CPUS];
24static atomic_t kdump_cpu_frozen; 24static atomic_t kdump_cpu_frozen;
25atomic_t kdump_in_progress; 25atomic_t kdump_in_progress;
26static int kdump_freeze_monarch;
26static int kdump_on_init = 1; 27static int kdump_on_init = 1;
27static int kdump_on_fatal_mca = 1; 28static int kdump_on_fatal_mca = 1;
28 29
@@ -108,10 +109,38 @@ machine_crash_shutdown(struct pt_regs *pt)
108 */ 109 */
109 kexec_disable_iosapic(); 110 kexec_disable_iosapic();
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
112 /*
113 * If kdump_on_init is set and an INIT is asserted here, kdump will
114 * be started again via INIT monarch.
115 */
116 local_irq_disable();
117 ia64_set_psr_mc(); /* mask MCA/INIT */
118 if (atomic_inc_return(&kdump_in_progress) != 1)
119 unw_init_running(kdump_cpu_freeze, NULL);
120
121 /*
122 * Now this cpu is ready for kdump.
123 * Stop all others by IPI or INIT. They could receive INIT from
124 * outside and might be INIT monarch, but only thing they have to
125 * do is falling into kdump_cpu_freeze().
126 *
127 * If an INIT is asserted here:
128 * - All receivers might be slaves, since some of cpus could already
129 * be frozen and INIT might be masked on monarch. In this case,
130 * all slaves will be frozen soon since kdump_in_progress will let
131 * them into DIE_INIT_SLAVE_LEAVE.
132 * - One might be a monarch, but INIT rendezvous will fail since
133 * at least this cpu already have INIT masked so it never join
134 * to the rendezvous. In this case, all slaves and monarch will
135 * be frozen soon with no wait since the INIT rendezvous is skipped
136 * by kdump_in_progress.
137 */
111 kdump_smp_send_stop(); 138 kdump_smp_send_stop();
112 /* not all cpu response to IPI, send INIT to freeze them */ 139 /* not all cpu response to IPI, send INIT to freeze them */
113 if (kdump_wait_cpu_freeze() && kdump_on_init) { 140 if (kdump_wait_cpu_freeze()) {
114 kdump_smp_send_init(); 141 kdump_smp_send_init();
142 /* wait again, don't go ahead if possible */
143 kdump_wait_cpu_freeze();
115 } 144 }
116#endif 145#endif
117} 146}
@@ -129,17 +158,17 @@ void
129kdump_cpu_freeze(struct unw_frame_info *info, void *arg) 158kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
130{ 159{
131 int cpuid; 160 int cpuid;
161
132 local_irq_disable(); 162 local_irq_disable();
133 cpuid = smp_processor_id(); 163 cpuid = smp_processor_id();
134 crash_save_this_cpu(); 164 crash_save_this_cpu();
135 current->thread.ksp = (__u64)info->sw - 16; 165 current->thread.ksp = (__u64)info->sw - 16;
166
167 ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */
168
136 atomic_inc(&kdump_cpu_frozen); 169 atomic_inc(&kdump_cpu_frozen);
137 kdump_status[cpuid] = 1; 170 kdump_status[cpuid] = 1;
138 mb(); 171 mb();
139#ifdef CONFIG_HOTPLUG_CPU
140 if (cpuid != 0)
141 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
142#endif
143 for (;;) 172 for (;;)
144 cpu_relax(); 173 cpu_relax();
145} 174}
@@ -150,6 +179,20 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
150 struct ia64_mca_notify_die *nd; 179 struct ia64_mca_notify_die *nd;
151 struct die_args *args = data; 180 struct die_args *args = data;
152 181
182 if (atomic_read(&kdump_in_progress)) {
183 switch (val) {
184 case DIE_INIT_MONARCH_LEAVE:
185 if (!kdump_freeze_monarch)
186 break;
187 /* fall through */
188 case DIE_INIT_SLAVE_LEAVE:
189 case DIE_INIT_MONARCH_ENTER:
190 case DIE_MCA_RENDZVOUS_LEAVE:
191 unw_init_running(kdump_cpu_freeze, NULL);
192 break;
193 }
194 }
195
153 if (!kdump_on_init && !kdump_on_fatal_mca) 196 if (!kdump_on_init && !kdump_on_fatal_mca)
154 return NOTIFY_DONE; 197 return NOTIFY_DONE;
155 198
@@ -162,43 +205,31 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
162 } 205 }
163 206
164 if (val != DIE_INIT_MONARCH_LEAVE && 207 if (val != DIE_INIT_MONARCH_LEAVE &&
165 val != DIE_INIT_SLAVE_LEAVE &&
166 val != DIE_INIT_MONARCH_PROCESS && 208 val != DIE_INIT_MONARCH_PROCESS &&
167 val != DIE_MCA_RENDZVOUS_LEAVE &&
168 val != DIE_MCA_MONARCH_LEAVE) 209 val != DIE_MCA_MONARCH_LEAVE)
169 return NOTIFY_DONE; 210 return NOTIFY_DONE;
170 211
171 nd = (struct ia64_mca_notify_die *)args->err; 212 nd = (struct ia64_mca_notify_die *)args->err;
172 /* Reason code 1 means machine check rendezvous*/
173 if ((val == DIE_INIT_MONARCH_LEAVE || val == DIE_INIT_SLAVE_LEAVE
174 || val == DIE_INIT_MONARCH_PROCESS) && nd->sos->rv_rc == 1)
175 return NOTIFY_DONE;
176 213
177 switch (val) { 214 switch (val) {
178 case DIE_INIT_MONARCH_PROCESS: 215 case DIE_INIT_MONARCH_PROCESS:
179 if (kdump_on_init) { 216 /* Reason code 1 means machine check rendezvous*/
180 atomic_set(&kdump_in_progress, 1); 217 if (kdump_on_init && (nd->sos->rv_rc != 1)) {
181 *(nd->monarch_cpu) = -1; 218 if (atomic_inc_return(&kdump_in_progress) != 1)
219 kdump_freeze_monarch = 1;
182 } 220 }
183 break; 221 break;
184 case DIE_INIT_MONARCH_LEAVE: 222 case DIE_INIT_MONARCH_LEAVE:
185 if (kdump_on_init) 223 /* Reason code 1 means machine check rendezvous*/
224 if (kdump_on_init && (nd->sos->rv_rc != 1))
186 machine_kdump_on_init(); 225 machine_kdump_on_init();
187 break; 226 break;
188 case DIE_INIT_SLAVE_LEAVE:
189 if (atomic_read(&kdump_in_progress))
190 unw_init_running(kdump_cpu_freeze, NULL);
191 break;
192 case DIE_MCA_RENDZVOUS_LEAVE:
193 if (atomic_read(&kdump_in_progress))
194 unw_init_running(kdump_cpu_freeze, NULL);
195 break;
196 case DIE_MCA_MONARCH_LEAVE: 227 case DIE_MCA_MONARCH_LEAVE:
197 /* *(nd->data) indicate if MCA is recoverable */ 228 /* *(nd->data) indicate if MCA is recoverable */
198 if (kdump_on_fatal_mca && !(*(nd->data))) { 229 if (kdump_on_fatal_mca && !(*(nd->data))) {
199 atomic_set(&kdump_in_progress, 1); 230 if (atomic_inc_return(&kdump_in_progress) == 1)
200 *(nd->monarch_cpu) = -1; 231 machine_kdump_on_init();
201 machine_kdump_on_init(); 232 /* We got fatal MCA while kdump!? No way!! */
202 } 233 }
203 break; 234 break;
204 } 235 }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 23f846de62d5..1a6e44515eb4 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -167,7 +167,7 @@ RestRR: \
167 mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ 167 mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
168 mov rr[_tmp1]=_tmp2 168 mov rr[_tmp1]=_tmp2
169 169
170 .section __special_page_section,"ax" 170 __PAGE_ALIGNED_DATA
171 171
172 .global empty_zero_page 172 .global empty_zero_page
173empty_zero_page: 173empty_zero_page:
@@ -181,7 +181,7 @@ swapper_pg_dir:
181halt_msg: 181halt_msg:
182 stringz "Halting kernel\n" 182 stringz "Halting kernel\n"
183 183
184 .section .text.head,"ax" 184 __REF
185 185
186 .global start_ap 186 .global start_ap
187 187
@@ -1242,7 +1242,7 @@ GLOBAL_ENTRY(ia64_jump_to_sal)
1242 movl r16=SAL_PSR_BITS_TO_SET;; 1242 movl r16=SAL_PSR_BITS_TO_SET;;
1243 mov cr.ipsr=r16 1243 mov cr.ipsr=r16
1244 mov cr.ifs=r0;; 1244 mov cr.ifs=r0;;
1245 rfi;; 1245 rfi;; // note: this unmask MCA/INIT (psr.mc)
12461: 12461:
1247 /* 1247 /*
1248 * Invalidate all TLB data/inst 1248 * Invalidate all TLB data/inst
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 0823de1f6ebe..3d3aeef46947 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -24,6 +24,8 @@
24#include <asm/delay.h> 24#include <asm/delay.h>
25#include <asm/meminit.h> 25#include <asm/meminit.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27#include <asm/sal.h>
28#include <asm/mca.h>
27 29
28typedef NORET_TYPE void (*relocate_new_kernel_t)( 30typedef NORET_TYPE void (*relocate_new_kernel_t)(
29 unsigned long indirection_page, 31 unsigned long indirection_page,
@@ -85,13 +87,26 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
85 void *pal_addr = efi_get_pal_addr(); 87 void *pal_addr = efi_get_pal_addr();
86 unsigned long code_addr = (unsigned long)page_address(image->control_code_page); 88 unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
87 int ii; 89 int ii;
90 u64 fp, gp;
91 ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump;
88 92
89 BUG_ON(!image); 93 BUG_ON(!image);
90 if (image->type == KEXEC_TYPE_CRASH) { 94 if (image->type == KEXEC_TYPE_CRASH) {
91 crash_save_this_cpu(); 95 crash_save_this_cpu();
92 current->thread.ksp = (__u64)info->sw - 16; 96 current->thread.ksp = (__u64)info->sw - 16;
97
98 /* Register noop init handler */
99 fp = ia64_tpa(init_handler->fp);
100 gp = ia64_tpa(ia64_getreg(_IA64_REG_GP));
101 ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0);
102 } else {
103 /* Unregister init handlers of current kernel */
104 ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0);
93 } 105 }
94 106
107 /* Unregister mca handler - No more recovery on current kernel */
108 ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0);
109
95 /* Interrupts aren't acceptable while we reboot */ 110 /* Interrupts aren't acceptable while we reboot */
96 local_irq_disable(); 111 local_irq_disable();
97 112
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7b30d21c5190..d2877a7bfe2e 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1682,14 +1682,25 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1682 1682
1683 if (!sos->monarch) { 1683 if (!sos->monarch) {
1684 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1684 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1685
1686#ifdef CONFIG_KEXEC
1687 while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
1688 udelay(1000);
1689#else
1685 while (monarch_cpu == -1) 1690 while (monarch_cpu == -1)
1686 cpu_relax(); /* spin until monarch enters */ 1691 cpu_relax(); /* spin until monarch enters */
1692#endif
1687 1693
1688 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1); 1694 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1689 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1); 1695 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1690 1696
1697#ifdef CONFIG_KEXEC
1698 while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
1699 udelay(1000);
1700#else
1691 while (monarch_cpu != -1) 1701 while (monarch_cpu != -1)
1692 cpu_relax(); /* spin until monarch leaves */ 1702 cpu_relax(); /* spin until monarch leaves */
1703#endif
1693 1704
1694 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1); 1705 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1695 1706
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index a06d46548ff9..7461d2573d41 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -40,6 +40,7 @@
40 40
41 .global ia64_do_tlb_purge 41 .global ia64_do_tlb_purge
42 .global ia64_os_mca_dispatch 42 .global ia64_os_mca_dispatch
43 .global ia64_os_init_on_kdump
43 .global ia64_os_init_dispatch_monarch 44 .global ia64_os_init_dispatch_monarch
44 .global ia64_os_init_dispatch_slave 45 .global ia64_os_init_dispatch_slave
45 46
@@ -299,6 +300,25 @@ END(ia64_os_mca_virtual_begin)
299//StartMain//////////////////////////////////////////////////////////////////// 300//StartMain////////////////////////////////////////////////////////////////////
300 301
301// 302//
303// NOP init handler for kdump. In panic situation, we may receive INIT
304// while kernel transition. Since we initialize registers on leave from
305// current kernel, no longer monarch/slave handlers of current kernel in
306// virtual mode are called safely.
307// We can unregister these init handlers from SAL, however then the INIT
308// will result in warmboot by SAL and we cannot retrieve the crashdump.
309// Therefore register this NOP function to SAL, to prevent entering virtual
310// mode and resulting warmboot by SAL.
311//
312ia64_os_init_on_kdump:
313 mov r8=r0 // IA64_INIT_RESUME
314 mov r9=r10 // SAL_GP
315 mov r22=r17 // *minstate
316 ;;
317 mov r10=r0 // return to same context
318 mov b0=r12 // SAL_CHECK return address
319 br b0
320
321//
302// SAL to OS entry point for INIT on all processors. This has been defined for 322// SAL to OS entry point for INIT on all processors. This has been defined for
303// registration purposes with SAL as a part of ia64_mca_init. Monarch and 323// registration purposes with SAL as a part of ia64_mca_init. Monarch and
304// slave INIT have identical processing, except for the value of the 324// slave INIT have identical processing, except for the value of the
@@ -1073,3 +1093,30 @@ GLOBAL_ENTRY(ia64_get_rnat)
1073 mov ar.rsc=3 1093 mov ar.rsc=3
1074 br.ret.sptk.many rp 1094 br.ret.sptk.many rp
1075END(ia64_get_rnat) 1095END(ia64_get_rnat)
1096
1097
1098// void ia64_set_psr_mc(void)
1099//
1100// Set psr.mc bit to mask MCA/INIT.
1101GLOBAL_ENTRY(ia64_set_psr_mc)
1102 rsm psr.i | psr.ic // disable interrupts
1103 ;;
1104 srlz.d
1105 ;;
1106 mov r14 = psr // get psr{36:35,31:0}
1107 movl r15 = 1f
1108 ;;
1109 dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
1110 ;;
1111 dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
1112 ;;
1113 dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
1114 ;;
1115 mov cr.ipsr = r14
1116 mov cr.ifs = r0
1117 mov cr.iip = r15
1118 ;;
1119 rfi
11201:
1121 br.ret.sptk.many rp
1122END(ia64_set_psr_mc)
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 903babd22d62..32f6fc131fbe 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -52,7 +52,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
52 srlz.i 52 srlz.i
53 ;; 53 ;;
54 mov ar.rnat=r18 54 mov ar.rnat=r18
55 rfi 55 rfi // note: this unmask MCA/INIT (psr.mc)
56 ;; 56 ;;
571: 571:
58 //physical mode code begin 58 //physical mode code begin
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index eb4214d1c5af..0a0c77b2c988 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -51,8 +51,6 @@ SECTIONS
51 KPROBES_TEXT 51 KPROBES_TEXT
52 *(.gnu.linkonce.t*) 52 *(.gnu.linkonce.t*)
53 } 53 }
54 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
55 { *(.text.head) }
56 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) 54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
57 { *(.text2) } 55 { *(.text2) }
58#ifdef CONFIG_SMP 56#ifdef CONFIG_SMP
@@ -66,14 +64,7 @@ SECTIONS
66 NOTES :code :note /* put .notes in text and mark in PT_NOTE */ 64 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
67 code_continues : {} :code /* switch back to regular program... */ 65 code_continues : {} :code /* switch back to regular program... */
68 66
69 /* Exception table */ 67 EXCEPTION_TABLE(16)
70 . = ALIGN(16);
71 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
72 {
73 __start___ex_table = .;
74 *(__ex_table)
75 __stop___ex_table = .;
76 }
77 68
78 /* MCA table */ 69 /* MCA table */
79 . = ALIGN(16); 70 . = ALIGN(16);
@@ -115,38 +106,9 @@ SECTIONS
115 106
116 . = ALIGN(PAGE_SIZE); 107 . = ALIGN(PAGE_SIZE);
117 __init_begin = .; 108 __init_begin = .;
118 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
119 {
120 _sinittext = .;
121 INIT_TEXT
122 _einittext = .;
123 }
124
125 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
126 { INIT_DATA }
127 109
128#ifdef CONFIG_BLK_DEV_INITRD 110 INIT_TEXT_SECTION(PAGE_SIZE)
129 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) 111 INIT_DATA_SECTION(16)
130 {
131 __initramfs_start = .;
132 *(.init.ramfs)
133 __initramfs_end = .;
134 }
135#endif
136
137 . = ALIGN(16);
138 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
139 {
140 __setup_start = .;
141 *(.init.setup)
142 __setup_end = .;
143 }
144 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
145 {
146 __initcall_start = .;
147 INITCALLS
148 __initcall_end = .;
149 }
150 112
151 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) 113 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
152 { 114 {
@@ -204,24 +166,13 @@ SECTIONS
204 } 166 }
205#endif 167#endif
206 168
207 . = ALIGN(8);
208 __con_initcall_start = .;
209 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
210 { *(.con_initcall.init) }
211 __con_initcall_end = .;
212 __security_initcall_start = .;
213 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
214 { *(.security_initcall.init) }
215 __security_initcall_end = .;
216 . = ALIGN(PAGE_SIZE); 169 . = ALIGN(PAGE_SIZE);
217 __init_end = .; 170 __init_end = .;
218 171
219 /* The initial task and kernel stack */
220 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
221 { *(.data.init_task) }
222
223 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) 172 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
224 { *(__special_page_section) 173 {
174 PAGE_ALIGNED_DATA(PAGE_SIZE)
175 . = ALIGN(PAGE_SIZE);
225 __start_gate_section = .; 176 __start_gate_section = .;
226 *(.data.gate) 177 *(.data.gate)
227 __stop_gate_section = .; 178 __stop_gate_section = .;
@@ -236,12 +187,6 @@ SECTIONS
236 * kernel data 187 * kernel data
237 */ 188 */
238 189
239 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
240 { *(.data.read_mostly) }
241
242 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
243 { *(.data.cacheline_aligned) }
244
245 /* Per-cpu data: */ 190 /* Per-cpu data: */
246 . = ALIGN(PERCPU_PAGE_SIZE); 191 . = ALIGN(PERCPU_PAGE_SIZE);
247 PERCPU_VADDR(PERCPU_ADDR, :percpu) 192 PERCPU_VADDR(PERCPU_ADDR, :percpu)
@@ -258,6 +203,9 @@ SECTIONS
258 __cpu0_per_cpu = .; 203 __cpu0_per_cpu = .;
259 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ 204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
260#endif 205#endif
206 INIT_TASK_DATA(PAGE_SIZE)
207 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
208 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
261 DATA_DATA 209 DATA_DATA
262 *(.data1) 210 *(.data1)
263 *(.gnu.linkonce.d*) 211 *(.gnu.linkonce.d*)
@@ -274,48 +222,15 @@ SECTIONS
274 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) 222 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
275 { *(.sdata) *(.sdata1) *(.srdata) } 223 { *(.sdata) *(.sdata1) *(.srdata) }
276 _edata = .; 224 _edata = .;
277 __bss_start = .; 225
278 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) 226 BSS_SECTION(0, 0, 0)
279 { *(.sbss) *(.scommon) }
280 .bss : AT(ADDR(.bss) - LOAD_OFFSET)
281 { *(.bss) *(COMMON) }
282 __bss_stop = .;
283 227
284 _end = .; 228 _end = .;
285 229
286 code : { } :code 230 code : { } :code
287 /* Stabs debugging sections. */ 231
288 .stab 0 : { *(.stab) } 232 STABS_DEBUG
289 .stabstr 0 : { *(.stabstr) } 233 DWARF_DEBUG
290 .stab.excl 0 : { *(.stab.excl) }
291 .stab.exclstr 0 : { *(.stab.exclstr) }
292 .stab.index 0 : { *(.stab.index) }
293 .stab.indexstr 0 : { *(.stab.indexstr) }
294 /* DWARF debug sections.
295 Symbols in the DWARF debugging sections are relative to the beginning
296 of the section so we begin them at 0. */
297 /* DWARF 1 */
298 .debug 0 : { *(.debug) }
299 .line 0 : { *(.line) }
300 /* GNU DWARF 1 extensions */
301 .debug_srcinfo 0 : { *(.debug_srcinfo) }
302 .debug_sfnames 0 : { *(.debug_sfnames) }
303 /* DWARF 1.1 and DWARF 2 */
304 .debug_aranges 0 : { *(.debug_aranges) }
305 .debug_pubnames 0 : { *(.debug_pubnames) }
306 /* DWARF 2 */
307 .debug_info 0 : { *(.debug_info) }
308 .debug_abbrev 0 : { *(.debug_abbrev) }
309 .debug_line 0 : { *(.debug_line) }
310 .debug_frame 0 : { *(.debug_frame) }
311 .debug_str 0 : { *(.debug_str) }
312 .debug_loc 0 : { *(.debug_loc) }
313 .debug_macinfo 0 : { *(.debug_macinfo) }
314 /* SGI/MIPS DWARF 2 extensions */
315 .debug_weaknames 0 : { *(.debug_weaknames) }
316 .debug_funcnames 0 : { *(.debug_funcnames) }
317 .debug_typenames 0 : { *(.debug_typenames) }
318 .debug_varnames 0 : { *(.debug_varnames) }
319 234
320 /* Default discards */ 235 /* Default discards */
321 DISCARDS 236 DISCARDS
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 239b3cedcf2b..5bc34eac9e01 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -54,6 +54,8 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
54 break; 54 break;
55 } 55 }
56 } 56 }
57 if (i >= ate_resource->num_ate)
58 return -1;
57 } else 59 } else
58 index++; /* Try next ate */ 60 index++; /* Try next ate */
59 } 61 }