From b64f34cdfe5bef9dfed1304c513220b0f2862eca Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Tue, 29 Jan 2008 14:27:30 +0900 Subject: [IA64] VIRT_CPU_ACCOUNTING (accurate cpu time accounting) This patch implements VIRT_CPU_ACCOUNTING for ia64, which enable us to use more accurate cpu time accounting. The VIRT_CPU_ACCOUNTING is an item of kernel config, which s390 and powerpc arch have. By turning this config on, these archs change the mechanism of cpu time accounting from tick-sampling based one to state-transition based one. The state-transition based accounting is done by checking time (cycle counter in processor) at every state-transition point, such as entrance/exit of kernel, interrupt, softirq etc. The difference between point to point is the actual time consumed during in the state. There is no doubt about that this value is more accurate than that of tick-sampling based accounting. Signed-off-by: Hidetoshi Seto Signed-off-by: Tony Luck --- include/asm-ia64/cputime.h | 104 +++++++++++++++++++++++++++++++++++++++++ include/asm-ia64/system.h | 12 +++++ include/asm-ia64/thread_info.h | 14 ++++++ 3 files changed, 130 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h index 72400a78002a..f9abdec6577a 100644 --- a/include/asm-ia64/cputime.h +++ b/include/asm-ia64/cputime.h @@ -1,6 +1,110 @@ +/* + * include/asm-ia64/cputime.h: + * Definitions for measuring cputime on ia64 machines. + * + * Based on . + * + * Copyright (C) 2007 FUJITSU LIMITED + * Copyright (C) 2007 Hidetoshi Seto + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. + * Otherwise we measure cpu time in jiffies using the generic definitions. + */ + #ifndef __IA64_CPUTIME_H #define __IA64_CPUTIME_H +#ifndef CONFIG_VIRT_CPU_ACCOUNTING #include +#else + +#include +#include +#include + +typedef u64 cputime_t; +typedef u64 cputime64_t; + +#define cputime_zero ((cputime_t)0) +#define cputime_max ((~((cputime_t)0) >> 1) - 1) +#define cputime_add(__a, __b) ((__a) + (__b)) +#define cputime_sub(__a, __b) ((__a) - (__b)) +#define cputime_div(__a, __n) ((__a) / (__n)) +#define cputime_halve(__a) ((__a) >> 1) +#define cputime_eq(__a, __b) ((__a) == (__b)) +#define cputime_gt(__a, __b) ((__a) > (__b)) +#define cputime_ge(__a, __b) ((__a) >= (__b)) +#define cputime_lt(__a, __b) ((__a) < (__b)) +#define cputime_le(__a, __b) ((__a) <= (__b)) + +#define cputime64_zero ((cputime64_t)0) +#define cputime64_add(__a, __b) ((__a) + (__b)) +#define cputime64_sub(__a, __b) ((__a) - (__b)) +#define cputime_to_cputime64(__ct) (__ct) + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) + +/* + * Convert cputime <-> milliseconds + */ +#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC) +#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) +#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + cputime_t ret = val->tv_sec * NSEC_PER_SEC; + return (ret + val->tv_nsec); +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + val->tv_sec = ct / NSEC_PER_SEC; + val->tv_nsec = ct % NSEC_PER_SEC; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(struct timeval *val) +{ + cputime_t ret = val->tv_sec * NSEC_PER_SEC; + return (ret + val->tv_usec * NSEC_PER_USEC); +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + val->tv_sec = ct / NSEC_PER_SEC; + val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) +#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #endif /* __IA64_CPUTIME_H */ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 595112bca3cc..dff8128fa58e 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -210,6 +210,13 @@ struct task_struct; extern void ia64_save_extra (struct task_struct *task); extern void ia64_load_extra (struct task_struct *task); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); +# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) +#else +# define IA64_ACCOUNT_ON_SWITCH(p,n) +#endif + #ifdef CONFIG_PERFMON DECLARE_PER_CPU(unsigned long, pfm_syst_info); # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) @@ -222,6 +229,7 @@ extern void ia64_load_extra (struct task_struct *task); || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) #define __switch_to(prev,next,last) do { \ + IA64_ACCOUNT_ON_SWITCH(prev, next); \ if (IA64_HAS_EXTRA_STATE(prev)) \ ia64_save_extra(prev); \ if (IA64_HAS_EXTRA_STATE(next)) \ @@ -266,6 +274,10 @@ void cpu_idle_wait(void); void default_idle(void); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void account_system_vtime(struct task_struct *); +#endif + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 93d83cbe0c8c..6da8069a0f77 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -31,6 +31,12 @@ struct thread_info { mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ struct restart_block restart_block; +#ifdef CONFIG_VIRT_CPU_ACCOUNTING + __u64 ac_stamp; + __u64 ac_leave; + __u64 ac_stime; + __u64 ac_utime; +#endif }; #define THREAD_SIZE KERNEL_STACK_SIZE @@ -62,9 +68,17 @@ struct thread_info { #define task_stack_page(tsk) ((void *)(tsk)) #define __HAVE_THREAD_FUNCTIONS +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#define setup_thread_stack(p, org) \ + *task_thread_info(p) = *task_thread_info(org); \ + task_thread_info(p)->ac_stime = 0; \ + task_thread_info(p)->ac_utime = 0; \ + task_thread_info(p)->task = (p); +#else #define setup_thread_stack(p, org) \ *task_thread_info(p) = *task_thread_info(org); \ task_thread_info(p)->task = (p); +#endif #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -- cgit v1.2.2 From c70f8f68676866d778564de337bec6b8734c3850 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Feb 2008 16:47:50 +0800 Subject: [IA64] regset: 64-bit support This is the 64-bit regset implementation under IA64. Basically register read/write, which is derived from current ptrace register read/write. Signed-off-by: Shaohua Li Signed-off-by: Tony Luck --- include/asm-ia64/elf.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h index f8e83eca67a2..064cf7dcea8e 100644 --- a/include/asm-ia64/elf.h +++ b/include/asm-ia64/elf.h @@ -154,6 +154,30 @@ extern void ia64_init_addr_space (void); #define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ #define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ +/* elf_gregset_t register offsets */ +#define ELF_GR_0_OFFSET 0 +#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t)) +#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t)) +#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t)) +#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t)) +#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t)) +#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t)) +#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t)) +#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t)) +#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t)) +#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t)) +#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t)) +#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t)) +#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t)) +#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t)) +#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t)) +#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t)) +#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t)) +#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t)) +#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t)) +#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t)) +#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t)) + typedef unsigned long elf_fpxregset_t; typedef unsigned long elf_greg_t; -- cgit v1.2.2 From 6cb53d7a6f40858181facde0f52587731d2e621f Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 28 Feb 2008 16:09:38 +0800 Subject: [IA64] use CORE_DUMP_USE_REGSET After we have regset support, we can use CORE_DUMP_USE_REGSET. Signed-off-by: Shaohua Li Signed-off-by: Tony Luck --- include/asm-ia64/elf.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h index 064cf7dcea8e..5e0c1a6bce8d 100644 --- a/include/asm-ia64/elf.h +++ b/include/asm-ia64/elf.h @@ -26,6 +26,7 @@ #define ELF_ARCH EM_IA_64 #define USE_ELF_CORE_DUMP +#define CORE_DUMP_USE_REGSET /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are interpreted as follows by Linux: */ @@ -207,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); struct task_struct; -extern int dump_task_regs(struct task_struct *, elf_gregset_t *); -extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); - -#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs) -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) - #define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ -- cgit v1.2.2 From 96651896b8d9ad4244a1c3ed9691faa3e382f503 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 3 Apr 2008 11:02:58 -0700 Subject: [IA64] Add API for allocating Dynamic TR resource. Dynamic TR resource should be managed in the uniform way. Add two interfaces for kernel: ia64_itr_entry: Allocate a (pair of) TR for caller. ia64_ptr_entry: Purge a (pair of ) TR by caller. Signed-off-by: Xiantao Zhang Signed-off-by: Anthony Xu Signed-off-by: Tony Luck --- include/asm-ia64/kregs.h | 3 +++ include/asm-ia64/tlb.h | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h index 7e55a584975c..aefcdfee7f23 100644 --- a/include/asm-ia64/kregs.h +++ b/include/asm-ia64/kregs.h @@ -31,6 +31,9 @@ #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ +#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ +#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ + /* Processor status register bits: */ #define IA64_PSR_BE_BIT 1 #define IA64_PSR_UP_BIT 2 diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index 26edcb750f9f..20d8a39680c2 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h @@ -64,6 +64,32 @@ struct mmu_gather { struct page *pages[FREE_PTE_NR]; }; +struct ia64_tr_entry { + u64 ifa; + u64 itir; + u64 pte; + u64 rr; +}; /*Record for tr entry!*/ + +extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); +extern void ia64_ptr_entry(u64 target_mask, int slot); + +extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; + +/* + region register macros +*/ +#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) +#define RR_VE(val) (((val) & 0x0000000000000001) << 0) +#define RR_VE_MASK 0x0000000000000001L +#define RR_VE_SHIFT 0 +#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) +#define RR_PS(val) (((val) & 0x000000000000003f) << 2) +#define RR_PS_MASK 0x00000000000000fcL +#define RR_PS_SHIFT 2 +#define RR_RID_MASK 0x00000000ffffff00L +#define RR_TO_RID(val) ((val >> 8) & 0xffffff) + /* Users of the generic TLB shootdown code must declare this storage space. */ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); -- cgit v1.2.2 From 31a6b11fed6ceec07ec4bdfefae56b8252d450cf Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 3 Apr 2008 11:39:43 -0700 Subject: [IA64] Implement smp_call_function_mask for ia64 This interface provides more flexible functionality for smp infrastructure ... e.g. KVM frequently needs to operate on a subset of cpus. Signed-off-by: Xiantao Zhang Signed-off-by: Tony Luck --- include/asm-ia64/smp.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 4fa733dd417a..ec5f355fb7e3 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -38,6 +38,9 @@ ia64_get_lid (void) return lid.f.id << 8 | lid.f.eid; } +extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), + void *info, int wait); + #define hard_smp_processor_id() ia64_get_lid() #ifdef CONFIG_SMP -- cgit v1.2.2 From 2046b94e7c4fce92eb8165c2c36c6478f4927178 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 4 Apr 2008 11:05:59 -0700 Subject: [IA64] Multiple outstanding ptc.g instruction support According to SDM2.2, Itanium supports multiple outstanding ptc.g instructions. But current kernel function ia64_global_tlb_purge() uses a spinlock to serialize ptc.g instructions issued by multiple processors. This serialization might have scalability issue on a big SMP machine where many processors could purge TLB in parallel. The patch fixes this problem by issuing multiple ptc.g instructions in ia64_global_tlb_purge(). It also adds support for the "PALO" table to get a platform view of the max number of outstanding ptc.g instructions (which may be different from the processor view found from PAL_VM_SUMMARY). PALO specification can be found at: http://www.dig64.org/home/DIG64_PALO_R1_0.pdf spinaphore implementation by Matthew Wilcox. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck --- include/asm-ia64/sal.h | 17 +++++++++++++++++ include/asm-ia64/tlbflush.h | 1 + 2 files changed, 18 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index f4904db3b057..3cd637a2c051 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -296,6 +296,9 @@ enum { EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) #define SAL_PLAT_BUS_ERR_SECT_GUID \ EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \ + EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \ + 0xca, 0x4d) #define MAX_CACHE_ERRORS 6 #define MAX_TLB_ERRORS 6 @@ -879,6 +882,20 @@ extern void ia64_jump_to_sal(struct sal_to_os_boot *); extern void ia64_sal_handler_init(void *entry_point, void *gpval); +#define PALO_MAX_TLB_PURGES 0xFFFF +#define PALO_SIG "PALO" + +struct palo_table { + u8 signature[4]; /* Should be "PALO" */ + u32 length; + u8 minor_revision; + u8 major_revision; + u8 checksum; + u8 reserved1[5]; + u16 max_tlb_purges; + u8 reserved2[6]; +}; + #endif /* __ASSEMBLY__ */ #endif /* _ASM_IA64_SAL_H */ diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index 7774a1cac0cc..3be25dfed164 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -17,6 +17,7 @@ * Now for some TLB flushing routines. This is the kind of stuff that * can be very expensive, so try to avoid them whenever possible. */ +extern void setup_ptcg_sem(int max_purges, int from_palo); /* * Flush everything (kernel mapping may also have changed due to -- cgit v1.2.2 From a6c75b86ce9f01db4ea9912877b526c2dc4d2f0a Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Fri, 14 Mar 2008 13:57:08 -0700 Subject: [IA64] Kernel parameter for max number of concurrent global TLB purges The patch defines kernel parameter "nptcg=". The parameter overrides max number of concurrent global TLB purges which is reported from either PAL_VM_SUMMARY or SAL PALO. Signed-off-by: Fenghua Yu Signed-off-by: Tony Luck --- include/asm-ia64/sal.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 3cd637a2c051..89594b442f83 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -896,6 +896,10 @@ struct palo_table { u8 reserved2[6]; }; +#define NPTCG_FROM_PAL 0 +#define NPTCG_FROM_PALO 1 +#define NPTCG_FROM_KERNEL_PARAMETER 2 + #endif /* __ASSEMBLY__ */ #endif /* _ASM_IA64_SAL_H */ -- cgit v1.2.2 From 2c6e6db41f01b6b4eb98809350827c9678996698 Mon Sep 17 00:00:00 2001 From: "holt@sgi.com" Date: Thu, 3 Apr 2008 15:17:13 -0500 Subject: [IA64] Minimize per_cpu reservations. This attached patch significantly shrinks boot memory allocation on ia64. It does this by not allocating per_cpu areas for cpus that can never exist. In the case where acpi does not have any numa node description of the cpus, I defaulted to assigning the first 32 round-robin on the known nodes.. For the !CONFIG_ACPI I used for_each_possible_cpu(). Signed-off-by: Robin Holt Signed-off-by: Tony Luck --- include/asm-ia64/acpi.h | 33 +++++++++++++++++++++++++++++++++ include/asm-ia64/numa.h | 2 ++ 2 files changed, 35 insertions(+) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index cd1cc39b5599..fcfad326f4c7 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -35,6 +35,7 @@ #include #include #include +#include #define COMPILER_DEPENDENT_INT64 long #define COMPILER_DEPENDENT_UINT64 unsigned long @@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu); extern void set_cpei_target_cpu(unsigned int cpu); extern unsigned int get_cpei_target_cpu(void); extern void prefill_possible_map(void); +#ifdef CONFIG_ACPI_HOTPLUG_CPU extern int additional_cpus; +#else +#define additional_cpus 0 +#endif #ifdef CONFIG_ACPI_NUMA #if MAX_NUMNODES > 256 @@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #define acpi_unlazy_tlb(x) +#ifdef CONFIG_ACPI_NUMA +extern cpumask_t early_cpu_possible_map; +#define for_each_possible_early_cpu(cpu) \ + for_each_cpu_mask((cpu), early_cpu_possible_map) + +static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) +{ + int low_cpu, high_cpu; + int cpu; + int next_nid = 0; + + low_cpu = cpus_weight(early_cpu_possible_map); + + high_cpu = max(low_cpu, min_cpus); + high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); + + for (cpu = low_cpu; cpu < high_cpu; cpu++) { + cpu_set(cpu, early_cpu_possible_map); + if (node_cpuid[cpu].nid == NUMA_NO_NODE) { + node_cpuid[cpu].nid = next_nid; + next_nid++; + if (next_nid >= num_online_nodes()) + next_nid = 0; + } + } +} +#endif /* CONFIG_ACPI_NUMA */ + #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 6a8a27cfae3e..3499ff57bf42 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h @@ -22,6 +22,8 @@ #include +#define NUMA_NO_NODE -1 + extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern pg_data_t *pgdat_list[MAX_NUMNODES]; -- cgit v1.2.2 From e4b05d4097eb6dab08bda86a72f6fdfdd9816395 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 9 Apr 2008 12:26:10 +0900 Subject: [IA64] pgd_offset() constfication. when compile 2.6.25-rc8-mm1, below warning happend. because walk_page_range pass argument as "const struct mm*", but pgd_offset() receive as "struct mm*". CC mm/pagewalk.o mm/pagewalk.c: In function 'walk_page_range': mm/pagewalk.c:111: warning: passing argument 1 of 'pgd_offset' discards qualifiers from pointer target type Signed-off-by: KOSAKI Motohiro Signed-off-by: Tony Luck --- include/asm-ia64/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e6204f14f614..ed70862ea247 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -371,7 +371,7 @@ pgd_index (unsigned long address) /* The offset in the 1-level directory is given by the 3 region bits (61..63) and the level-1 bits. */ static inline pgd_t* -pgd_offset (struct mm_struct *mm, unsigned long address) +pgd_offset (const struct mm_struct *mm, unsigned long address) { return mm->pgd + pgd_index(address); } -- cgit v1.2.2 From 34e1ceb1881ec895ad9b1b52d073f414f3aa87a9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Mar 2008 14:27:02 -0700 Subject: [IA64] kprobes: kprobe-booster for ia64 Add kprobe-booster support on ia64. Kprobe-booster improves the performance of kprobes by eliminating single-step, where possible. Currently, kprobe-booster is implemented on x86 and x86-64. This is an ia64 port. On ia64, kprobe-booster executes a copied bundle directly, instead of single stepping. Bundles which have B or X unit and which may cause an exception (including break) are not executed directly. And also, to prevent hitting break exceptions on the copied bundle, only the hindmost kprobe is executed directly if several kprobes share a bundle and are placed in different slots. Note: set_brl_inst() is used for preparing an instruction buffer(it does not modify any active code), so it does not need any atomic operation. Signed-off-by: Masami Hiramatsu Cc: Anil S Keshavamurthy Cc: Ananth N Mavinakayanahalli Cc: bibo,mao Cc: Rusty Lynch Cc: Prasanna S Panchamukhi Cc: Jim Keniston Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- include/asm-ia64/kprobes.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index d03bf9ff68e3..ef71b57fc2f4 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -30,8 +30,12 @@ #include #define __ARCH_WANT_KPROBES_INSN_SLOT -#define MAX_INSN_SIZE 1 +#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */ #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) +#define NOP_M_INST (long)(1<<27) +#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \ + (0x1L << 12) | /* many */ \ + (((i1) & 1) << 36) | ((i2) << 13))) /* imm */ typedef union cmp_inst { struct { @@ -112,6 +116,7 @@ struct arch_specific_insn { #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 #define INST_FLAG_FIX_BRANCH_REG 2 #define INST_FLAG_BREAK_INST 4 + #define INST_FLAG_BOOSTABLE 8 unsigned long inst_flag; unsigned short target_br_reg; unsigned short slot; -- cgit v1.2.2 From c19b2930df0621500913c005c06978bd8933110b Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Fri, 29 Feb 2008 17:14:44 -0600 Subject: [IA64] Itanium Spec updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates based on the "IntelĀ® ItaniumĀ® Architecture Software Developer's Manual Specification Update October 2007". http://download.intel.com/design/itanium/specupdt/24869911.pdf Signed-off-by: Russ Anderson Signed-off-by: Tony Luck --- include/asm-ia64/pal.h | 72 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 63 insertions(+), 9 deletions(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 8a695d3407d2..67b02901ead4 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -13,6 +13,7 @@ * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond * Copyright (C) 1999 Srinivasa Prasad Thirumalachar + * Copyright (C) 2008 Silicon Graphics, Inc. (SGI) * * 99/10/01 davidm Make sure we pass zero for reserved parameters. * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. @@ -73,6 +74,8 @@ #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ #define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ #define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ +#define PAL_VP_INFO 50 /* Information about virtual processor features */ +#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */ #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ @@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s { wiv : 1, /* Way field valid */ reserved2 : 1, dp : 1, /* Data poisoned on MBE */ - reserved3 : 8, + reserved3 : 6, + hlth : 2, /* Health indicator */ index : 20, /* Cache line index */ reserved4 : 2, @@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s { dtc : 1, /* Fail in data TC */ itc : 1, /* Fail in inst. TC */ op : 4, /* Cache operation */ - reserved3 : 30, + reserved3 : 6, + hlth : 2, /* Health indicator */ + reserved4 : 22, is : 1, /* instruction set (1 == ia32) */ iv : 1, /* instruction set field valid */ @@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s { way : 6, /* Way of structure */ wv : 1, /* way valid */ xv : 1, /* index valid */ - reserved1 : 8, + reserved1 : 6, + hlth : 2, /* Health indicator */ index : 8, /* Index or set of the uarch * structure that failed. */ @@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void) /* Return the machine check dynamic processor state */ static inline s64 -ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds) +ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); + PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0); if (size) *size = iprv.v0; - if (pds) - *pds = iprv.v1; return iprv.status; } @@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous) return iprv.status; } +typedef union pal_hw_tracking_u { + u64 pht_data; + struct { + u64 itc :4, /* Instruction cache tracking */ + dct :4, /* Date cache tracking */ + itt :4, /* Instruction TLB tracking */ + ddt :4, /* Data TLB tracking */ + reserved:48; + } pal_hw_tracking_s; +} pal_hw_tracking_u_t; + +/* + * Hardware tracking status. + */ +static inline s64 +ia64_pal_mc_hw_tracking (u64 *status) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0); + if (status) + *status = iprv.v0; + return iprv.status; +} + /* Register a platform dependent location with PAL to which it can save * minimal processor state in the event of a machine check or initialization * event. */ static inline s64 -ia64_pal_mc_register_mem (u64 physical_addr) +ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); + PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0); + if (req_size) + *req_size = iprv.v0; return iprv.status; } @@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2) return iprv.status; } +typedef union pal_vp_info_u { + u64 pvi_val; + struct { + u64 index: 48, /* virtual feature set info */ + vmm_id: 16; /* feature set id */ + } pal_vp_info_s; +} pal_vp_info_u_t; + +/* + * Returns infomation about virtual processor features + */ +static inline s64 +ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0); + if (vp_info) + *vp_info = iprv.v0; + if (vmm_id) + *vmm_id = iprv.v1; + return iprv.status; +} + typedef union pal_itr_valid_u { u64 piv_val; struct { -- cgit v1.2.2 From 98075d245a5bc4aeebc2e9f16fa8b089a5c200ac Mon Sep 17 00:00:00 2001 From: Zoltan Menyhart Date: Fri, 11 Apr 2008 15:21:35 -0700 Subject: [IA64] Fix NUMA configuration issue There is a NUMA memory configuration issue in 2.6.24: A 2-node machine of ours has got the following memory layout: Node 0: 0 - 2 Gbytes Node 0: 4 - 8 Gbytes Node 1: 8 - 16 Gbytes Node 0: 16 - 18 Gbytes "efi_memmap_init()" merges the three last ranges into one. "register_active_ranges()" is called as follows: efi_memmap_walk(register_active_ranges, NULL); i.e. once for the 4 - 18 Gbytes range. It picks up the node number from the start address, and registers all the memory for the node #0. "register_active_ranges()" should be called as follows to make sure there is no merged address range at its entry: efi_memmap_walk(filter_memory, register_active_ranges); "filter_memory()" is similar to "filter_rsvd_memory()", but the reserved memory ranges are not filtered out. Signed-off-by: Zoltan Menyhart Signed-off-by: Tony Luck --- include/asm-ia64/meminit.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/asm-ia64') diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index f93308f54b61..7245a5781594 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h @@ -35,6 +35,7 @@ extern void find_memory (void); extern void reserve_memory (void); extern void find_initrd (void); extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); +extern int filter_memory (unsigned long start, unsigned long end, void *arg); extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); @@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ -extern int register_active_ranges(u64 start, u64 end, void *arg); +extern int register_active_ranges(u64 start, u64 len, int nid); #ifdef CONFIG_VIRTUAL_MEM_MAP # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ -- cgit v1.2.2