diff options
| -rw-r--r-- | arch/ia64/Kconfig | 2 | ||||
| -rw-r--r-- | arch/ia64/Makefile | 2 | ||||
| -rw-r--r-- | arch/ia64/configs/generic_defconfig (renamed from arch/ia64/defconfig) | 0 | ||||
| -rw-r--r-- | arch/ia64/ia32/ia32_signal.c | 13 | ||||
| -rw-r--r-- | arch/ia64/kernel/iosapic.c | 4 | ||||
| -rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 134 | ||||
| -rw-r--r-- | arch/ia64/kernel/kprobes.c | 5 | ||||
| -rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 3 | ||||
| -rw-r--r-- | arch/ia64/kernel/sal.c | 7 | ||||
| -rw-r--r-- | arch/ia64/kernel/signal.c | 36 | ||||
| -rw-r--r-- | include/asm-ia64/Kbuild | 2 | ||||
| -rw-r--r-- | include/asm-ia64/hw_irq.h | 12 | ||||
| -rw-r--r-- | include/asm-ia64/kprobes.h | 4 | ||||
| -rw-r--r-- | include/asm-ia64/sal.h | 4 |
14 files changed, 180 insertions, 48 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 56762d3c2a6a..8fa3faf5ef1b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -156,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB | |||
| 156 | 156 | ||
| 157 | config IA64_SGI_SN2 | 157 | config IA64_SGI_SN2 |
| 158 | bool "SGI-SN2" | 158 | bool "SGI-SN2" |
| 159 | select NUMA | ||
| 160 | select ACPI_NUMA | ||
| 159 | help | 161 | help |
| 160 | Selecting this option will optimize the kernel for use on sn2 based | 162 | Selecting this option will optimize the kernel for use on sn2 based |
| 161 | systems, but the resulting kernel binary will not run on other | 163 | systems, but the resulting kernel binary will not run on other |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index b916ccfdef84..f1645c4f7039 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
| @@ -11,6 +11,8 @@ | |||
| 11 | # Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com> | 11 | # Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com> |
| 12 | # | 12 | # |
| 13 | 13 | ||
| 14 | KBUILD_DEFCONFIG := generic_defconfig | ||
| 15 | |||
| 14 | NM := $(CROSS_COMPILE)nm -B | 16 | NM := $(CROSS_COMPILE)nm -B |
| 15 | READELF := $(CROSS_COMPILE)readelf | 17 | READELF := $(CROSS_COMPILE)readelf |
| 16 | 18 | ||
diff --git a/arch/ia64/defconfig b/arch/ia64/configs/generic_defconfig index 0210545e7f61..0210545e7f61 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/configs/generic_defconfig | |||
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index 85e82f32e480..256a7faeda07 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c | |||
| @@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
| 766 | 766 | ||
| 767 | /* This is the X/Open sanctioned signal stack switching. */ | 767 | /* This is the X/Open sanctioned signal stack switching. */ |
| 768 | if (ka->sa.sa_flags & SA_ONSTACK) { | 768 | if (ka->sa.sa_flags & SA_ONSTACK) { |
| 769 | if (!on_sig_stack(esp)) | 769 | int onstack = sas_ss_flags(esp); |
| 770 | |||
| 771 | if (onstack == 0) | ||
| 770 | esp = current->sas_ss_sp + current->sas_ss_size; | 772 | esp = current->sas_ss_sp + current->sas_ss_size; |
| 773 | else if (onstack == SS_ONSTACK) { | ||
| 774 | /* | ||
| 775 | * If we are on the alternate signal stack and would | ||
| 776 | * overflow it, don't. Return an always-bogus address | ||
| 777 | * instead so we will die with SIGSEGV. | ||
| 778 | */ | ||
| 779 | if (!likely(on_sig_stack(esp - frame_size))) | ||
| 780 | return (void __user *) -1L; | ||
| 781 | } | ||
| 771 | } | 782 | } |
| 772 | /* Legacy stack switching not supported */ | 783 | /* Legacy stack switching not supported */ |
| 773 | 784 | ||
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 398e2fd1cd25..7b3292282dea 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
| @@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
| 345 | if (cpus_empty(mask)) | 345 | if (cpus_empty(mask)) |
| 346 | return; | 346 | return; |
| 347 | 347 | ||
| 348 | if (reassign_irq_vector(irq, first_cpu(mask))) | 348 | if (irq_prepare_move(irq, first_cpu(mask))) |
| 349 | return; | 349 | return; |
| 350 | 350 | ||
| 351 | dest = cpu_physical_id(first_cpu(mask)); | 351 | dest = cpu_physical_id(first_cpu(mask)); |
| @@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq) | |||
| 397 | struct iosapic_rte_info *rte; | 397 | struct iosapic_rte_info *rte; |
| 398 | int do_unmask_irq = 0; | 398 | int do_unmask_irq = 0; |
| 399 | 399 | ||
| 400 | irq_complete_move(irq); | ||
| 400 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { | 401 | if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { |
| 401 | do_unmask_irq = 1; | 402 | do_unmask_irq = 1; |
| 402 | mask_irq(irq); | 403 | mask_irq(irq); |
| @@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq) | |||
| 450 | { | 451 | { |
| 451 | irq_desc_t *idesc = irq_desc + irq; | 452 | irq_desc_t *idesc = irq_desc + irq; |
| 452 | 453 | ||
| 454 | irq_complete_move(irq); | ||
| 453 | move_native_irq(irq); | 455 | move_native_irq(irq); |
| 454 | /* | 456 | /* |
| 455 | * Once we have recorded IRQ_PENDING already, we can mask the | 457 | * Once we have recorded IRQ_PENDING already, we can mask the |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 0b52f19ed046..2b8cf6e85af4 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
| @@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu) | |||
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | 262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) |
| 263 | #define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR | ||
| 264 | |||
| 263 | static enum vector_domain_type { | 265 | static enum vector_domain_type { |
| 264 | VECTOR_DOMAIN_NONE, | 266 | VECTOR_DOMAIN_NONE, |
| 265 | VECTOR_DOMAIN_PERCPU | 267 | VECTOR_DOMAIN_PERCPU |
| @@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
| 272 | return CPU_MASK_ALL; | 274 | return CPU_MASK_ALL; |
| 273 | } | 275 | } |
| 274 | 276 | ||
| 277 | static int __irq_prepare_move(int irq, int cpu) | ||
| 278 | { | ||
| 279 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
| 280 | int vector; | ||
| 281 | cpumask_t domain; | ||
| 282 | |||
| 283 | if (cfg->move_in_progress || cfg->move_cleanup_count) | ||
| 284 | return -EBUSY; | ||
| 285 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) | ||
| 286 | return -EINVAL; | ||
| 287 | if (cpu_isset(cpu, cfg->domain)) | ||
| 288 | return 0; | ||
| 289 | domain = vector_allocation_domain(cpu); | ||
| 290 | vector = find_unassigned_vector(domain); | ||
| 291 | if (vector < 0) | ||
| 292 | return -ENOSPC; | ||
| 293 | cfg->move_in_progress = 1; | ||
| 294 | cfg->old_domain = cfg->domain; | ||
| 295 | cfg->vector = IRQ_VECTOR_UNASSIGNED; | ||
| 296 | cfg->domain = CPU_MASK_NONE; | ||
| 297 | BUG_ON(__bind_irq_vector(irq, vector, domain)); | ||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | |||
| 301 | int irq_prepare_move(int irq, int cpu) | ||
| 302 | { | ||
| 303 | unsigned long flags; | ||
| 304 | int ret; | ||
| 305 | |||
| 306 | spin_lock_irqsave(&vector_lock, flags); | ||
| 307 | ret = __irq_prepare_move(irq, cpu); | ||
| 308 | spin_unlock_irqrestore(&vector_lock, flags); | ||
| 309 | return ret; | ||
| 310 | } | ||
| 311 | |||
| 312 | void irq_complete_move(unsigned irq) | ||
| 313 | { | ||
| 314 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
| 315 | cpumask_t cleanup_mask; | ||
| 316 | int i; | ||
| 317 | |||
| 318 | if (likely(!cfg->move_in_progress)) | ||
| 319 | return; | ||
| 320 | |||
| 321 | if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) | ||
| 322 | return; | ||
| 323 | |||
| 324 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
| 325 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
| 326 | for_each_cpu_mask(i, cleanup_mask) | ||
| 327 | platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); | ||
| 328 | cfg->move_in_progress = 0; | ||
| 329 | } | ||
| 330 | |||
| 331 | static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | ||
| 332 | { | ||
| 333 | int me = smp_processor_id(); | ||
| 334 | ia64_vector vector; | ||
| 335 | unsigned long flags; | ||
| 336 | |||
| 337 | for (vector = IA64_FIRST_DEVICE_VECTOR; | ||
| 338 | vector < IA64_LAST_DEVICE_VECTOR; vector++) { | ||
| 339 | int irq; | ||
| 340 | struct irq_desc *desc; | ||
| 341 | struct irq_cfg *cfg; | ||
| 342 | irq = __get_cpu_var(vector_irq)[vector]; | ||
| 343 | if (irq < 0) | ||
| 344 | continue; | ||
| 345 | |||
| 346 | desc = irq_desc + irq; | ||
| 347 | cfg = irq_cfg + irq; | ||
| 348 | spin_lock(&desc->lock); | ||
| 349 | if (!cfg->move_cleanup_count) | ||
| 350 | goto unlock; | ||
| 351 | |||
| 352 | if (!cpu_isset(me, cfg->old_domain)) | ||
| 353 | goto unlock; | ||
| 354 | |||
| 355 | spin_lock_irqsave(&vector_lock, flags); | ||
| 356 | __get_cpu_var(vector_irq)[vector] = -1; | ||
| 357 | cpu_clear(me, vector_table[vector]); | ||
| 358 | spin_unlock_irqrestore(&vector_lock, flags); | ||
| 359 | cfg->move_cleanup_count--; | ||
| 360 | unlock: | ||
| 361 | spin_unlock(&desc->lock); | ||
| 362 | } | ||
| 363 | return IRQ_HANDLED; | ||
| 364 | } | ||
| 365 | |||
| 366 | static struct irqaction irq_move_irqaction = { | ||
| 367 | .handler = smp_irq_move_cleanup_interrupt, | ||
| 368 | .flags = IRQF_DISABLED, | ||
| 369 | .name = "irq_move" | ||
| 370 | }; | ||
| 371 | |||
| 275 | static int __init parse_vector_domain(char *arg) | 372 | static int __init parse_vector_domain(char *arg) |
| 276 | { | 373 | { |
| 277 | if (!arg) | 374 | if (!arg) |
| @@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq) | |||
| 303 | spin_unlock_irqrestore(&vector_lock, flags); | 400 | spin_unlock_irqrestore(&vector_lock, flags); |
| 304 | } | 401 | } |
| 305 | 402 | ||
| 306 | static int __reassign_irq_vector(int irq, int cpu) | ||
| 307 | { | ||
| 308 | struct irq_cfg *cfg = &irq_cfg[irq]; | ||
| 309 | int vector; | ||
| 310 | cpumask_t domain; | ||
| 311 | |||
| 312 | if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) | ||
| 313 | return -EINVAL; | ||
| 314 | if (cpu_isset(cpu, cfg->domain)) | ||
| 315 | return 0; | ||
| 316 | domain = vector_allocation_domain(cpu); | ||
| 317 | vector = find_unassigned_vector(domain); | ||
| 318 | if (vector < 0) | ||
| 319 | return -ENOSPC; | ||
| 320 | __clear_irq_vector(irq); | ||
| 321 | BUG_ON(__bind_irq_vector(irq, vector, domain)); | ||
| 322 | return 0; | ||
| 323 | } | ||
| 324 | |||
| 325 | int reassign_irq_vector(int irq, int cpu) | ||
| 326 | { | ||
| 327 | unsigned long flags; | ||
| 328 | int ret; | ||
| 329 | |||
| 330 | spin_lock_irqsave(&vector_lock, flags); | ||
| 331 | ret = __reassign_irq_vector(irq, cpu); | ||
| 332 | spin_unlock_irqrestore(&vector_lock, flags); | ||
| 333 | return ret; | ||
| 334 | } | ||
| 335 | |||
| 336 | /* | 403 | /* |
| 337 | * Dynamic irq allocate and deallocation for MSI | 404 | * Dynamic irq allocate and deallocation for MSI |
| 338 | */ | 405 | */ |
| @@ -578,6 +645,13 @@ init_IRQ (void) | |||
| 578 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); | 645 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); |
| 579 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); | 646 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); |
| 580 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); | 647 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); |
| 648 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) | ||
| 649 | if (vector_domain_type != VECTOR_DOMAIN_NONE) { | ||
| 650 | BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); | ||
| 651 | IA64_FIRST_DEVICE_VECTOR++; | ||
| 652 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); | ||
| 653 | } | ||
| 654 | #endif | ||
| 581 | #endif | 655 | #endif |
| 582 | #ifdef CONFIG_PERFMON | 656 | #ifdef CONFIG_PERFMON |
| 583 | pfm_init_percpu(); | 657 | pfm_init_percpu(); |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index b618487cdc85..615c3d2b6348 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
| @@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 1001 | return 1; | 1001 | return 1; |
| 1002 | } | 1002 | } |
| 1003 | 1003 | ||
| 1004 | /* ia64 does not need this */ | ||
| 1005 | void __kprobes jprobe_return(void) | ||
| 1006 | { | ||
| 1007 | } | ||
| 1008 | |||
| 1004 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 1009 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 1005 | { | 1010 | { |
| 1006 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 1011 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index e86d02959794..60c6ef67ebb2 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
| @@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
| 57 | if (!cpu_online(cpu)) | 57 | if (!cpu_online(cpu)) |
| 58 | return; | 58 | return; |
| 59 | 59 | ||
| 60 | if (reassign_irq_vector(irq, cpu)) | 60 | if (irq_prepare_move(irq, cpu)) |
| 61 | return; | 61 | return; |
| 62 | 62 | ||
| 63 | read_msi_msg(irq, &msg); | 63 | read_msi_msg(irq, &msg); |
| @@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq) | |||
| 119 | 119 | ||
| 120 | static void ia64_ack_msi_irq(unsigned int irq) | 120 | static void ia64_ack_msi_irq(unsigned int irq) |
| 121 | { | 121 | { |
| 122 | irq_complete_move(irq); | ||
| 122 | move_native_irq(irq); | 123 | move_native_irq(irq); |
| 123 | ia64_eoi(); | 124 | ia64_eoi(); |
| 124 | } | 125 | } |
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index f44fe8412162..a3022dc48ef8 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c | |||
| @@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab) | |||
| 109 | sal_revision = SAL_VERSION_CODE(2, 8); | 109 | sal_revision = SAL_VERSION_CODE(2, 8); |
| 110 | sal_version = SAL_VERSION_CODE(0, 0); | 110 | sal_version = SAL_VERSION_CODE(0, 0); |
| 111 | } | 111 | } |
| 112 | |||
| 113 | if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9))) | ||
| 114 | /* | ||
| 115 | * SGI Altix has hard-coded version 2.9 in their prom | ||
| 116 | * but they actually implement 3.2, so let's fix it here. | ||
| 117 | */ | ||
| 118 | sal_revision = SAL_VERSION_CODE(3, 2); | ||
| 112 | } | 119 | } |
| 113 | 120 | ||
| 114 | static void __init | 121 | static void __init |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 309da3567bc8..5740296c35af 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
| @@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
| 342 | 342 | ||
| 343 | new_sp = scr->pt.r12; | 343 | new_sp = scr->pt.r12; |
| 344 | tramp_addr = (unsigned long) __kernel_sigtramp; | 344 | tramp_addr = (unsigned long) __kernel_sigtramp; |
| 345 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) { | 345 | if (ka->sa.sa_flags & SA_ONSTACK) { |
| 346 | new_sp = current->sas_ss_sp + current->sas_ss_size; | 346 | int onstack = sas_ss_flags(new_sp); |
| 347 | /* | 347 | |
| 348 | * We need to check for the register stack being on the signal stack | 348 | if (onstack == 0) { |
| 349 | * separately, because it's switched separately (memory stack is switched | 349 | new_sp = current->sas_ss_sp + current->sas_ss_size; |
| 350 | * in the kernel, register stack is switched in the signal trampoline). | 350 | /* |
| 351 | */ | 351 | * We need to check for the register stack being on the |
| 352 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | 352 | * signal stack separately, because it's switched |
| 353 | new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); | 353 | * separately (memory stack is switched in the kernel, |
| 354 | * register stack is switched in the signal trampoline). | ||
| 355 | */ | ||
| 356 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | ||
| 357 | new_rbs = ALIGN(current->sas_ss_sp, | ||
| 358 | sizeof(long)); | ||
| 359 | } else if (onstack == SS_ONSTACK) { | ||
| 360 | unsigned long check_sp; | ||
| 361 | |||
| 362 | /* | ||
| 363 | * If we are on the alternate signal stack and would | ||
| 364 | * overflow it, don't. Return an always-bogus address | ||
| 365 | * instead so we will die with SIGSEGV. | ||
| 366 | */ | ||
| 367 | check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; | ||
| 368 | if (!likely(on_sig_stack(check_sp))) | ||
| 369 | return force_sigsegv_info(sig, (void __user *) | ||
| 370 | check_sp); | ||
| 371 | } | ||
| 354 | } | 372 | } |
| 355 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); | 373 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); |
| 356 | 374 | ||
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index 4a1e48b9f403..eb24a3f47caa 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild | |||
| @@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm | |||
| 3 | header-y += break.h | 3 | header-y += break.h |
| 4 | header-y += fpu.h | 4 | header-y += fpu.h |
| 5 | header-y += fpswa.h | 5 | header-y += fpswa.h |
| 6 | header-y += gcc_intrin.h | ||
| 7 | header-y += ia64regs.h | 6 | header-y += ia64regs.h |
| 8 | header-y += intel_intrin.h | 7 | header-y += intel_intrin.h |
| 9 | header-y += intrinsics.h | 8 | header-y += intrinsics.h |
| @@ -12,5 +11,6 @@ header-y += ptrace_offsets.h | |||
| 12 | header-y += rse.h | 11 | header-y += rse.h |
| 13 | header-y += ucontext.h | 12 | header-y += ucontext.h |
| 14 | 13 | ||
| 14 | unifdef-y += gcc_intrin.h | ||
| 15 | unifdef-y += perfmon.h | 15 | unifdef-y += perfmon.h |
| 16 | unifdef-y += ustack.h | 16 | unifdef-y += ustack.h |
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 7e6e3779670a..76366dc9c1a0 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h | |||
| @@ -93,6 +93,9 @@ extern __u8 isa_irq_to_vector_map[16]; | |||
| 93 | struct irq_cfg { | 93 | struct irq_cfg { |
| 94 | ia64_vector vector; | 94 | ia64_vector vector; |
| 95 | cpumask_t domain; | 95 | cpumask_t domain; |
| 96 | cpumask_t old_domain; | ||
| 97 | unsigned move_cleanup_count; | ||
| 98 | u8 move_in_progress : 1; | ||
| 96 | }; | 99 | }; |
| 97 | extern spinlock_t vector_lock; | 100 | extern spinlock_t vector_lock; |
| 98 | extern struct irq_cfg irq_cfg[NR_IRQS]; | 101 | extern struct irq_cfg irq_cfg[NR_IRQS]; |
| @@ -106,12 +109,19 @@ extern int assign_irq_vector (int irq); /* allocate a free vector */ | |||
| 106 | extern void free_irq_vector (int vector); | 109 | extern void free_irq_vector (int vector); |
| 107 | extern int reserve_irq_vector (int vector); | 110 | extern int reserve_irq_vector (int vector); |
| 108 | extern void __setup_vector_irq(int cpu); | 111 | extern void __setup_vector_irq(int cpu); |
| 109 | extern int reassign_irq_vector(int irq, int cpu); | ||
| 110 | extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); | 112 | extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); |
| 111 | extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); | 113 | extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); |
| 112 | extern int check_irq_used (int irq); | 114 | extern int check_irq_used (int irq); |
| 113 | extern void destroy_and_reserve_irq (unsigned int irq); | 115 | extern void destroy_and_reserve_irq (unsigned int irq); |
| 114 | 116 | ||
| 117 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | ||
| 118 | extern int irq_prepare_move(int irq, int cpu); | ||
| 119 | extern void irq_complete_move(unsigned int irq); | ||
| 120 | #else | ||
| 121 | static inline int irq_prepare_move(int irq, int cpu) { return 0; } | ||
| 122 | static inline void irq_complete_move(unsigned int irq) {} | ||
| 123 | #endif | ||
| 124 | |||
| 115 | static inline void ia64_resend_irq(unsigned int vector) | 125 | static inline void ia64_resend_irq(unsigned int vector) |
| 116 | { | 126 | { |
| 117 | platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); | 127 | platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); |
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index adbaba14eb0a..8233b3a964c6 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
| @@ -121,10 +121,6 @@ extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr); | |||
| 121 | extern int kprobe_exceptions_notify(struct notifier_block *self, | 121 | extern int kprobe_exceptions_notify(struct notifier_block *self, |
| 122 | unsigned long val, void *data); | 122 | unsigned long val, void *data); |
| 123 | 123 | ||
| 124 | /* ia64 does not need this */ | ||
| 125 | static inline void jprobe_return(void) | ||
| 126 | { | ||
| 127 | } | ||
| 128 | extern void invalidate_stacked_regs(void); | 124 | extern void invalidate_stacked_regs(void); |
| 129 | extern void flush_register_stack(void); | 125 | extern void flush_register_stack(void); |
| 130 | extern void arch_remove_kprobe(struct kprobe *p); | 126 | extern void arch_remove_kprobe(struct kprobe *p); |
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 2251118894ae..f4904db3b057 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h | |||
| @@ -807,6 +807,10 @@ static inline s64 | |||
| 807 | ia64_sal_physical_id_info(u16 *splid) | 807 | ia64_sal_physical_id_info(u16 *splid) |
| 808 | { | 808 | { |
| 809 | struct ia64_sal_retval isrv; | 809 | struct ia64_sal_retval isrv; |
| 810 | |||
| 811 | if (sal_revision < SAL_VERSION_CODE(3,2)) | ||
| 812 | return -1; | ||
| 813 | |||
| 810 | SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0); | 814 | SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0); |
| 811 | if (splid) | 815 | if (splid) |
| 812 | *splid = isrv.v0; | 816 | *splid = isrv.v0; |
