aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c53
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c15
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/time.c39
-rw-r--r--arch/ia64/kernel/traps.c8
7 files changed, 86 insertions, 42 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d2702c419cf8..ecd44bdc8394 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -761,6 +761,59 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
761 return (0); 761 return (0);
762} 762}
763 763
764int additional_cpus __initdata = -1;
765
766static __init int setup_additional_cpus(char *s)
767{
768 if (s)
769 additional_cpus = simple_strtol(s, NULL, 0);
770
771 return 0;
772}
773
774early_param("additional_cpus", setup_additional_cpus);
775
776/*
777 * cpu_possible_map should be static, it cannot change as cpu's
778 * are onlined, or offlined. The reason is per-cpu data-structures
779 * are allocated by some modules at init time, and dont expect to
780 * do this dynamically on cpu arrival/departure.
781 * cpu_present_map on the other hand can change dynamically.
782 * In case when cpu_hotplug is not compiled, then we resort to current
783 * behaviour, which is cpu_possible == cpu_present.
784 * - Ashok Raj
785 *
786 * Three ways to find out the number of additional hotplug CPUs:
787 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
788 * - The user can overwrite it with additional_cpus=NUM
789 * - Otherwise don't reserve additional CPUs.
790 */
791__init void prefill_possible_map(void)
792{
793 int i;
794 int possible, disabled_cpus;
795
796 disabled_cpus = total_cpus - available_cpus;
797
798 if (additional_cpus == -1) {
799 if (disabled_cpus > 0)
800 additional_cpus = disabled_cpus;
801 else
802 additional_cpus = 0;
803 }
804
805 possible = available_cpus + additional_cpus;
806
807 if (possible > NR_CPUS)
808 possible = NR_CPUS;
809
810 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
811 possible, max((possible - available_cpus), 0));
812
813 for (i = 0; i < possible; i++)
814 cpu_set(i, cpu_possible_map);
815}
816
764int acpi_map_lsapic(acpi_handle handle, int *pcpu) 817int acpi_map_lsapic(acpi_handle handle, int *pcpu)
765{ 818{
766 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 819 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 27b222c277e4..930fdfca6ddb 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -569,7 +569,9 @@ GLOBAL_ENTRY(ia64_trace_syscall)
569.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 569.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
570.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 570.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
571 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value 571 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
572.ret3: br.cond.sptk .work_pending_syscall_end 572.ret3:
573(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
574 br.cond.sptk .work_pending_syscall_end
573 575
574strace_error: 576strace_error:
575 ld8 r3=[r2] // load pt_regs.r8 577 ld8 r3=[r2] // load pt_regs.r8
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index e72de580ebbf..bbcfd08378a6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -10,23 +10,8 @@
10 10
11#include <linux/string.h> 11#include <linux/string.h>
12EXPORT_SYMBOL(memset); 12EXPORT_SYMBOL(memset);
13EXPORT_SYMBOL(memchr);
14EXPORT_SYMBOL(memcmp);
15EXPORT_SYMBOL(memcpy); 13EXPORT_SYMBOL(memcpy);
16EXPORT_SYMBOL(memmove);
17EXPORT_SYMBOL(memscan);
18EXPORT_SYMBOL(strcat);
19EXPORT_SYMBOL(strchr);
20EXPORT_SYMBOL(strcmp);
21EXPORT_SYMBOL(strcpy);
22EXPORT_SYMBOL(strlen); 14EXPORT_SYMBOL(strlen);
23EXPORT_SYMBOL(strncat);
24EXPORT_SYMBOL(strncmp);
25EXPORT_SYMBOL(strncpy);
26EXPORT_SYMBOL(strnlen);
27EXPORT_SYMBOL(strrchr);
28EXPORT_SYMBOL(strstr);
29EXPORT_SYMBOL(strpbrk);
30 15
31#include <asm/checksum.h> 16#include <asm/checksum.h>
32EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 17EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 35f7835294a3..3258e09278d0 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -430,6 +430,7 @@ setup_arch (char **cmdline_p)
430 if (early_console_setup(*cmdline_p) == 0) 430 if (early_console_setup(*cmdline_p) == 0)
431 mark_bsp_online(); 431 mark_bsp_online();
432 432
433 parse_early_param();
433#ifdef CONFIG_ACPI 434#ifdef CONFIG_ACPI
434 /* Initialize the ACPI boot-time table parser */ 435 /* Initialize the ACPI boot-time table parser */
435 acpi_table_init(); 436 acpi_table_init();
@@ -688,6 +689,9 @@ void
688setup_per_cpu_areas (void) 689setup_per_cpu_areas (void)
689{ 690{
690 /* start_kernel() requires this... */ 691 /* start_kernel() requires this... */
692#ifdef CONFIG_ACPI_HOTPLUG_CPU
693 prefill_possible_map();
694#endif
691} 695}
692 696
693/* 697/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8f44e7d2df66..b681ef34a86e 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -129,7 +129,7 @@ DEFINE_PER_CPU(int, cpu_state);
129/* Bitmasks of currently online, and possible CPUs */ 129/* Bitmasks of currently online, and possible CPUs */
130cpumask_t cpu_online_map; 130cpumask_t cpu_online_map;
131EXPORT_SYMBOL(cpu_online_map); 131EXPORT_SYMBOL(cpu_online_map);
132cpumask_t cpu_possible_map; 132cpumask_t cpu_possible_map = CPU_MASK_NONE;
133EXPORT_SYMBOL(cpu_possible_map); 133EXPORT_SYMBOL(cpu_possible_map);
134 134
135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
@@ -506,9 +506,6 @@ smp_build_cpu_map (void)
506 506
507 for (cpu = 0; cpu < NR_CPUS; cpu++) { 507 for (cpu = 0; cpu < NR_CPUS; cpu++) {
508 ia64_cpu_to_sapicid[cpu] = -1; 508 ia64_cpu_to_sapicid[cpu] = -1;
509#ifdef CONFIG_HOTPLUG_CPU
510 cpu_set(cpu, cpu_possible_map);
511#endif
512 } 509 }
513 510
514 ia64_cpu_to_sapicid[0] = boot_cpu_id; 511 ia64_cpu_to_sapicid[0] = boot_cpu_id;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index a094ec49ccfa..307d01e15b2e 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -250,32 +250,27 @@ time_init (void)
250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); 250 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
251} 251}
252 252
253#define SMALLUSECS 100 253/*
254 254 * Generic udelay assumes that if preemption is allowed and the thread
255void 255 * migrates to another CPU, that the ITC values are synchronized across
256udelay (unsigned long usecs) 256 * all CPUs.
257 */
258static void
259ia64_itc_udelay (unsigned long usecs)
257{ 260{
258 unsigned long start; 261 unsigned long start = ia64_get_itc();
259 unsigned long cycles; 262 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
260 unsigned long smallusecs;
261 263
262 /* 264 while (time_before(ia64_get_itc(), end))
263 * Execute the non-preemptible delay loop (because the ITC might 265 cpu_relax();
264 * not be synchronized between CPUS) in relatively short time 266}
265 * chunks, allowing preemption between the chunks.
266 */
267 while (usecs > 0) {
268 smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
269 preempt_disable();
270 cycles = smallusecs*local_cpu_data->cyc_per_usec;
271 start = ia64_get_itc();
272 267
273 while (ia64_get_itc() - start < cycles) 268void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
274 cpu_relax();
275 269
276 preempt_enable(); 270void
277 usecs -= smallusecs; 271udelay (unsigned long usecs)
278 } 272{
273 (*ia64_udelay)(usecs);
279} 274}
280EXPORT_SYMBOL(udelay); 275EXPORT_SYMBOL(udelay);
281 276
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 55391901b013..dabd6c32641e 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> /* for EXPORT_SYMBOL */ 16#include <linux/module.h> /* for EXPORT_SYMBOL */
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/kprobes.h> 18#include <linux/kprobes.h>
19#include <linux/delay.h> /* for ssleep() */
19 20
20#include <asm/fpswa.h> 21#include <asm/fpswa.h>
21#include <asm/ia32.h> 22#include <asm/ia32.h>
@@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err)
116 bust_spinlocks(0); 117 bust_spinlocks(0);
117 die.lock_owner = -1; 118 die.lock_owner = -1;
118 spin_unlock_irq(&die.lock); 119 spin_unlock_irq(&die.lock);
120
121 if (panic_on_oops) {
122 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
123 ssleep(5);
124 panic("Fatal exception");
125 }
126
119 do_exit(SIGSEGV); 127 do_exit(SIGSEGV);
120} 128}
121 129