aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/acpi.h33
-rw-r--r--include/asm-ia64/bitops.h16
-rw-r--r--include/asm-ia64/cputime.h104
-rw-r--r--include/asm-ia64/dma-mapping.h28
-rw-r--r--include/asm-ia64/dmi.h5
-rw-r--r--include/asm-ia64/elf.h31
-rw-r--r--include/asm-ia64/futex.h2
-rw-r--r--include/asm-ia64/gcc_intrin.h12
-rw-r--r--include/asm-ia64/hugetlb.h79
-rw-r--r--include/asm-ia64/ide.h10
-rw-r--r--include/asm-ia64/io.h5
-rw-r--r--include/asm-ia64/kprobes.h7
-rw-r--r--include/asm-ia64/kregs.h3
-rw-r--r--include/asm-ia64/kvm.h205
-rw-r--r--include/asm-ia64/kvm_host.h524
-rw-r--r--include/asm-ia64/kvm_para.h29
-rw-r--r--include/asm-ia64/machvec.h50
-rw-r--r--include/asm-ia64/machvec_hpzx1.h16
-rw-r--r--include/asm-ia64/machvec_hpzx1_swiotlb.h16
-rw-r--r--include/asm-ia64/machvec_sn2.h16
-rw-r--r--include/asm-ia64/mca.h1
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/numa.h2
-rw-r--r--include/asm-ia64/page.h6
-rw-r--r--include/asm-ia64/pal.h72
-rw-r--r--include/asm-ia64/pgtable.h5
-rw-r--r--include/asm-ia64/processor.h63
-rw-r--r--include/asm-ia64/sal.h21
-rw-r--r--include/asm-ia64/semaphore.h100
-rw-r--r--include/asm-ia64/smp.h3
-rw-r--r--include/asm-ia64/sn/nodepda.h1
-rw-r--r--include/asm-ia64/sn/xp.h485
-rw-r--r--include/asm-ia64/sn/xpc.h1267
-rw-r--r--include/asm-ia64/system.h30
-rw-r--r--include/asm-ia64/thread_info.h19
-rw-r--r--include/asm-ia64/tlb.h26
-rw-r--r--include/asm-ia64/tlbflush.h1
-rw-r--r--include/asm-ia64/topology.h9
-rw-r--r--include/asm-ia64/unaligned.h7
-rw-r--r--include/asm-ia64/uncached.h6
40 files changed, 1358 insertions, 1960 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index cd1cc39b5599..fcfad326f4c7 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -35,6 +35,7 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/numa.h> 36#include <linux/numa.h>
37#include <asm/system.h> 37#include <asm/system.h>
38#include <asm/numa.h>
38 39
39#define COMPILER_DEPENDENT_INT64 long 40#define COMPILER_DEPENDENT_INT64 long
40#define COMPILER_DEPENDENT_UINT64 unsigned long 41#define COMPILER_DEPENDENT_UINT64 unsigned long
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu);
115extern void set_cpei_target_cpu(unsigned int cpu); 116extern void set_cpei_target_cpu(unsigned int cpu);
116extern unsigned int get_cpei_target_cpu(void); 117extern unsigned int get_cpei_target_cpu(void);
117extern void prefill_possible_map(void); 118extern void prefill_possible_map(void);
119#ifdef CONFIG_ACPI_HOTPLUG_CPU
118extern int additional_cpus; 120extern int additional_cpus;
121#else
122#define additional_cpus 0
123#endif
119 124
120#ifdef CONFIG_ACPI_NUMA 125#ifdef CONFIG_ACPI_NUMA
121#if MAX_NUMNODES > 256 126#if MAX_NUMNODES > 256
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
129 134
130#define acpi_unlazy_tlb(x) 135#define acpi_unlazy_tlb(x)
131 136
137#ifdef CONFIG_ACPI_NUMA
138extern cpumask_t early_cpu_possible_map;
139#define for_each_possible_early_cpu(cpu) \
140 for_each_cpu_mask((cpu), early_cpu_possible_map)
141
142static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
143{
144 int low_cpu, high_cpu;
145 int cpu;
146 int next_nid = 0;
147
148 low_cpu = cpus_weight(early_cpu_possible_map);
149
150 high_cpu = max(low_cpu, min_cpus);
151 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
152
153 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
154 cpu_set(cpu, early_cpu_possible_map);
155 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
156 node_cpuid[cpu].nid = next_nid;
157 next_nid++;
158 if (next_nid >= num_online_nodes())
159 next_nid = 0;
160 }
161 }
162}
163#endif /* CONFIG_ACPI_NUMA */
164
132#endif /*__KERNEL__*/ 165#endif /*__KERNEL__*/
133 166
134#endif /*_ASM_ACPI_H*/ 167#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 953d3df9dd22..e2ca80037335 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -407,6 +407,22 @@ fls (int t)
407 return ia64_popcnt(x); 407 return ia64_popcnt(x);
408} 408}
409 409
410/*
411 * Find the last (most significant) bit set. Undefined for x==0.
412 * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
413 */
414static inline unsigned long
415__fls (unsigned long x)
416{
417 x |= x >> 1;
418 x |= x >> 2;
419 x |= x >> 4;
420 x |= x >> 8;
421 x |= x >> 16;
422 x |= x >> 32;
423 return ia64_popcnt(x) - 1;
424}
425
410#include <asm-generic/bitops/fls64.h> 426#include <asm-generic/bitops/fls64.h>
411 427
412/* 428/*
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h
index 72400a78002a..f9abdec6577a 100644
--- a/include/asm-ia64/cputime.h
+++ b/include/asm-ia64/cputime.h
@@ -1,6 +1,110 @@
1/*
2 * include/asm-ia64/cputime.h:
3 * Definitions for measuring cputime on ia64 machines.
4 *
5 * Based on <asm-powerpc/cputime.h>.
6 *
7 * Copyright (C) 2007 FUJITSU LIMITED
8 * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
16 * Otherwise we measure cpu time in jiffies using the generic definitions.
17 */
18
1#ifndef __IA64_CPUTIME_H 19#ifndef __IA64_CPUTIME_H
2#define __IA64_CPUTIME_H 20#define __IA64_CPUTIME_H
3 21
22#ifndef CONFIG_VIRT_CPU_ACCOUNTING
4#include <asm-generic/cputime.h> 23#include <asm-generic/cputime.h>
24#else
25
26#include <linux/time.h>
27#include <linux/jiffies.h>
28#include <asm/processor.h>
29
30typedef u64 cputime_t;
31typedef u64 cputime64_t;
32
33#define cputime_zero ((cputime_t)0)
34#define cputime_max ((~((cputime_t)0) >> 1) - 1)
35#define cputime_add(__a, __b) ((__a) + (__b))
36#define cputime_sub(__a, __b) ((__a) - (__b))
37#define cputime_div(__a, __n) ((__a) / (__n))
38#define cputime_halve(__a) ((__a) >> 1)
39#define cputime_eq(__a, __b) ((__a) == (__b))
40#define cputime_gt(__a, __b) ((__a) > (__b))
41#define cputime_ge(__a, __b) ((__a) >= (__b))
42#define cputime_lt(__a, __b) ((__a) < (__b))
43#define cputime_le(__a, __b) ((__a) <= (__b))
44
45#define cputime64_zero ((cputime64_t)0)
46#define cputime64_add(__a, __b) ((__a) + (__b))
47#define cputime64_sub(__a, __b) ((__a) - (__b))
48#define cputime_to_cputime64(__ct) (__ct)
49
50/*
51 * Convert cputime <-> jiffies (HZ)
52 */
53#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
54#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
55#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
56#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
57
58/*
59 * Convert cputime <-> milliseconds
60 */
61#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
62#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
63
64/*
65 * Convert cputime <-> seconds
66 */
67#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC)
68#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC)
69
70/*
71 * Convert cputime <-> timespec (nsec)
72 */
73static inline cputime_t timespec_to_cputime(const struct timespec *val)
74{
75 cputime_t ret = val->tv_sec * NSEC_PER_SEC;
76 return (ret + val->tv_nsec);
77}
78static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
79{
80 val->tv_sec = ct / NSEC_PER_SEC;
81 val->tv_nsec = ct % NSEC_PER_SEC;
82}
83
84/*
85 * Convert cputime <-> timeval (msec)
86 */
87static inline cputime_t timeval_to_cputime(struct timeval *val)
88{
89 cputime_t ret = val->tv_sec * NSEC_PER_SEC;
90 return (ret + val->tv_usec * NSEC_PER_USEC);
91}
92static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
93{
94 val->tv_sec = ct / NSEC_PER_SEC;
95 val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
96}
97
98/*
99 * Convert cputime <-> clock (USER_HZ)
100 */
101#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ))
102#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ))
103
104/*
105 * Convert cputime64 to clock.
106 */
107#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct)
5 108
109#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
6#endif /* __IA64_CPUTIME_H */ 110#endif /* __IA64_CPUTIME_H */
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index f1735a22d0ea..9f0df9bd46b7 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -23,10 +23,30 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
23{ 23{
24 dma_free_coherent(dev, size, cpu_addr, dma_handle); 24 dma_free_coherent(dev, size, cpu_addr, dma_handle);
25} 25}
26#define dma_map_single platform_dma_map_single 26#define dma_map_single_attrs platform_dma_map_single_attrs
27#define dma_map_sg platform_dma_map_sg 27static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
28#define dma_unmap_single platform_dma_unmap_single 28 size_t size, int dir)
29#define dma_unmap_sg platform_dma_unmap_sg 29{
30 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
31}
32#define dma_map_sg_attrs platform_dma_map_sg_attrs
33static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
34 int nents, int dir)
35{
36 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
37}
38#define dma_unmap_single_attrs platform_dma_unmap_single_attrs
39static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
40 size_t size, int dir)
41{
42 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
43}
44#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
45static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
46 int nents, int dir)
47{
48 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
49}
30#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu 50#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
31#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu 51#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
32#define dma_sync_single_for_device platform_dma_sync_single_for_device 52#define dma_sync_single_for_device platform_dma_sync_single_for_device
diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h
index f3efaa229525..00eb1b130b63 100644
--- a/include/asm-ia64/dmi.h
+++ b/include/asm-ia64/dmi.h
@@ -3,4 +3,9 @@
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
6/* Use normal IO mappings for DMI */
7#define dmi_ioremap ioremap
8#define dmi_iounmap(x,l) iounmap(x)
9#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
10
6#endif 11#endif
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
index f8e83eca67a2..5e0c1a6bce8d 100644
--- a/include/asm-ia64/elf.h
+++ b/include/asm-ia64/elf.h
@@ -26,6 +26,7 @@
26#define ELF_ARCH EM_IA_64 26#define ELF_ARCH EM_IA_64
27 27
28#define USE_ELF_CORE_DUMP 28#define USE_ELF_CORE_DUMP
29#define CORE_DUMP_USE_REGSET
29 30
30/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are 31/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
31 interpreted as follows by Linux: */ 32 interpreted as follows by Linux: */
@@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void);
154#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ 155#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
155#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ 156#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
156 157
158/* elf_gregset_t register offsets */
159#define ELF_GR_0_OFFSET 0
160#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
161#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
162#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
163#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
164#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
165#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
166#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
167#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
168#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
169#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
170#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
171#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
172#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
173#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
174#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
175#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
176#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
177#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
178#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
179#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
180#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
181
157typedef unsigned long elf_fpxregset_t; 182typedef unsigned long elf_fpxregset_t;
158 183
159typedef unsigned long elf_greg_t; 184typedef unsigned long elf_greg_t;
@@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
183 208
184struct task_struct; 209struct task_struct;
185 210
186extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
187extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
188
189#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
190#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
191
192#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) 211#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
193 212
194/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 213/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 8a98a2654139..c7f0f062239c 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -2,9 +2,9 @@
2#define _ASM_FUTEX_H 2#define _ASM_FUTEX_H
3 3
4#include <linux/futex.h> 4#include <linux/futex.h>
5#include <linux/uaccess.h>
5#include <asm/errno.h> 6#include <asm/errno.h>
6#include <asm/system.h> 7#include <asm/system.h>
7#include <asm/uaccess.h>
8 8
9#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ 9#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10do { \ 10do { \
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index de2ed2cbdd84..2fe292c275fe 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -21,6 +21,10 @@
21 21
22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) 22#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23 23
24#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
25
26#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
27
24extern void ia64_bad_param_for_setreg (void); 28extern void ia64_bad_param_for_setreg (void);
25extern void ia64_bad_param_for_getreg (void); 29extern void ia64_bad_param_for_getreg (void);
26 30
@@ -517,6 +521,14 @@ do { \
517#define ia64_ptrd(addr, size) \ 521#define ia64_ptrd(addr, size) \
518 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") 522 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
519 523
524#define ia64_ttag(addr) \
525({ \
526 __u64 ia64_intri_res; \
527 asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
528 ia64_intri_res; \
529})
530
531
520/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ 532/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
521 533
522#define ia64_lfhint_none 0 534#define ia64_lfhint_none 0
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h
new file mode 100644
index 000000000000..f28a9701f1cf
--- /dev/null
+++ b/include/asm-ia64/hugetlb.h
@@ -0,0 +1,79 @@
1#ifndef _ASM_IA64_HUGETLB_H
2#define _ASM_IA64_HUGETLB_H
3
4#include <asm/page.h>
5
6
7void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
8 unsigned long end, unsigned long floor,
9 unsigned long ceiling);
10
11int prepare_hugepage_range(unsigned long addr, unsigned long len);
12
13static inline int is_hugepage_only_range(struct mm_struct *mm,
14 unsigned long addr,
15 unsigned long len)
16{
17 return (REGION_NUMBER(addr) == RGN_HPAGE ||
18 REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
19}
20
21static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
22{
23}
24
25static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
26 pte_t *ptep, pte_t pte)
27{
28 set_pte_at(mm, addr, ptep, pte);
29}
30
31static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
32 unsigned long addr, pte_t *ptep)
33{
34 return ptep_get_and_clear(mm, addr, ptep);
35}
36
37static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
38 unsigned long addr, pte_t *ptep)
39{
40}
41
42static inline int huge_pte_none(pte_t pte)
43{
44 return pte_none(pte);
45}
46
47static inline pte_t huge_pte_wrprotect(pte_t pte)
48{
49 return pte_wrprotect(pte);
50}
51
52static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
53 unsigned long addr, pte_t *ptep)
54{
55 ptep_set_wrprotect(mm, addr, ptep);
56}
57
58static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
59 unsigned long addr, pte_t *ptep,
60 pte_t pte, int dirty)
61{
62 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
63}
64
65static inline pte_t huge_ptep_get(pte_t *ptep)
66{
67 return *ptep;
68}
69
70static inline int arch_prepare_hugepage(struct page *page)
71{
72 return 0;
73}
74
75static inline void arch_release_hugepage(struct page *page)
76{
77}
78
79#endif /* _ASM_IA64_HUGETLB_H */
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h
index 1ccf23809329..8fa3f8cd067a 100644
--- a/include/asm-ia64/ide.h
+++ b/include/asm-ia64/ide.h
@@ -16,8 +16,6 @@
16 16
17#include <linux/irq.h> 17#include <linux/irq.h>
18 18
19#define IDE_ARCH_OBSOLETE_DEFAULTS
20
21static inline int ide_default_irq(unsigned long base) 19static inline int ide_default_irq(unsigned long base)
22{ 20{
23 switch (base) { 21 switch (base) {
@@ -46,14 +44,6 @@ static inline unsigned long ide_default_io_base(int index)
46 } 44 }
47} 45}
48 46
49#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
50
51#ifdef CONFIG_PCI
52#define ide_init_default_irq(base) (0)
53#else
54#define ide_init_default_irq(base) ide_default_irq(base)
55#endif
56
57#include <asm-generic/ide_iops.h> 47#include <asm-generic/ide_iops.h>
58 48
59#endif /* __KERNEL__ */ 49#endif /* __KERNEL__ */
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index 4ebed77aa472..260a85ac9d6a 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -423,11 +423,6 @@ extern void __iomem * ioremap(unsigned long offset, unsigned long size);
423extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 423extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
424extern void iounmap (volatile void __iomem *addr); 424extern void iounmap (volatile void __iomem *addr);
425 425
426/* Use normal IO mappings for DMI */
427#define dmi_ioremap ioremap
428#define dmi_iounmap(x,l) iounmap(x)
429#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
430
431/* 426/*
432 * String version of IO memory access ops: 427 * String version of IO memory access ops:
433 */ 428 */
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index d03bf9ff68e3..ef71b57fc2f4 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -30,8 +30,12 @@
30#include <asm/break.h> 30#include <asm/break.h>
31 31
32#define __ARCH_WANT_KPROBES_INSN_SLOT 32#define __ARCH_WANT_KPROBES_INSN_SLOT
33#define MAX_INSN_SIZE 1 33#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
34#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) 34#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
35#define NOP_M_INST (long)(1<<27)
36#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
37 (0x1L << 12) | /* many */ \
38 (((i1) & 1) << 36) | ((i2) << 13))) /* imm */
35 39
36typedef union cmp_inst { 40typedef union cmp_inst {
37 struct { 41 struct {
@@ -112,6 +116,7 @@ struct arch_specific_insn {
112 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 116 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
113 #define INST_FLAG_FIX_BRANCH_REG 2 117 #define INST_FLAG_FIX_BRANCH_REG 2
114 #define INST_FLAG_BREAK_INST 4 118 #define INST_FLAG_BREAK_INST 4
119 #define INST_FLAG_BOOSTABLE 8
115 unsigned long inst_flag; 120 unsigned long inst_flag;
116 unsigned short target_br_reg; 121 unsigned short target_br_reg;
117 unsigned short slot; 122 unsigned short slot;
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h
index 7e55a584975c..aefcdfee7f23 100644
--- a/include/asm-ia64/kregs.h
+++ b/include/asm-ia64/kregs.h
@@ -31,6 +31,9 @@
31#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ 31#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
32#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ 32#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
33 33
34#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
35#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/
36
34/* Processor status register bits: */ 37/* Processor status register bits: */
35#define IA64_PSR_BE_BIT 1 38#define IA64_PSR_BE_BIT 1
36#define IA64_PSR_UP_BIT 2 39#define IA64_PSR_UP_BIT 2
diff --git a/include/asm-ia64/kvm.h b/include/asm-ia64/kvm.h
index 030d29b4b26b..eb2d3559d089 100644
--- a/include/asm-ia64/kvm.h
+++ b/include/asm-ia64/kvm.h
@@ -1,6 +1,205 @@
1#ifndef __LINUX_KVM_IA64_H 1#ifndef __ASM_IA64_KVM_H
2#define __LINUX_KVM_IA64_H 2#define __ASM_IA64_KVM_H
3 3
4/* ia64 does not support KVM */ 4/*
5 * asm-ia64/kvm.h: kvm structure definitions for ia64
6 *
7 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24#include <asm/types.h>
25#include <asm/fpu.h>
26
27#include <linux/ioctl.h>
28
29/* Architectural interrupt line count. */
30#define KVM_NR_INTERRUPTS 256
31
32#define KVM_IOAPIC_NUM_PINS 24
33
34struct kvm_ioapic_state {
35 __u64 base_address;
36 __u32 ioregsel;
37 __u32 id;
38 __u32 irr;
39 __u32 pad;
40 union {
41 __u64 bits;
42 struct {
43 __u8 vector;
44 __u8 delivery_mode:3;
45 __u8 dest_mode:1;
46 __u8 delivery_status:1;
47 __u8 polarity:1;
48 __u8 remote_irr:1;
49 __u8 trig_mode:1;
50 __u8 mask:1;
51 __u8 reserve:7;
52 __u8 reserved[4];
53 __u8 dest_id;
54 } fields;
55 } redirtbl[KVM_IOAPIC_NUM_PINS];
56};
57
58#define KVM_IRQCHIP_PIC_MASTER 0
59#define KVM_IRQCHIP_PIC_SLAVE 1
60#define KVM_IRQCHIP_IOAPIC 2
61
62#define KVM_CONTEXT_SIZE 8*1024
63
64union context {
65 /* 8K size */
66 char dummy[KVM_CONTEXT_SIZE];
67 struct {
68 unsigned long psr;
69 unsigned long pr;
70 unsigned long caller_unat;
71 unsigned long pad;
72 unsigned long gr[32];
73 unsigned long ar[128];
74 unsigned long br[8];
75 unsigned long cr[128];
76 unsigned long rr[8];
77 unsigned long ibr[8];
78 unsigned long dbr[8];
79 unsigned long pkr[8];
80 struct ia64_fpreg fr[128];
81 };
82};
83
84struct thash_data {
85 union {
86 struct {
87 unsigned long p : 1; /* 0 */
88 unsigned long rv1 : 1; /* 1 */
89 unsigned long ma : 3; /* 2-4 */
90 unsigned long a : 1; /* 5 */
91 unsigned long d : 1; /* 6 */
92 unsigned long pl : 2; /* 7-8 */
93 unsigned long ar : 3; /* 9-11 */
94 unsigned long ppn : 38; /* 12-49 */
95 unsigned long rv2 : 2; /* 50-51 */
96 unsigned long ed : 1; /* 52 */
97 unsigned long ig1 : 11; /* 53-63 */
98 };
99 struct {
100 unsigned long __rv1 : 53; /* 0-52 */
101 unsigned long contiguous : 1; /*53 */
102 unsigned long tc : 1; /* 54 TR or TC */
103 unsigned long cl : 1;
104 /* 55 I side or D side cache line */
105 unsigned long len : 4; /* 56-59 */
106 unsigned long io : 1; /* 60 entry is for io or not */
107 unsigned long nomap : 1;
108 /* 61 entry cann't be inserted into machine TLB.*/
109 unsigned long checked : 1;
110 /* 62 for VTLB/VHPT sanity check */
111 unsigned long invalid : 1;
112 /* 63 invalid entry */
113 };
114 unsigned long page_flags;
115 }; /* same for VHPT and TLB */
116
117 union {
118 struct {
119 unsigned long rv3 : 2;
120 unsigned long ps : 6;
121 unsigned long key : 24;
122 unsigned long rv4 : 32;
123 };
124 unsigned long itir;
125 };
126 union {
127 struct {
128 unsigned long ig2 : 12;
129 unsigned long vpn : 49;
130 unsigned long vrn : 3;
131 };
132 unsigned long ifa;
133 unsigned long vadr;
134 struct {
135 unsigned long tag : 63;
136 unsigned long ti : 1;
137 };
138 unsigned long etag;
139 };
140 union {
141 struct thash_data *next;
142 unsigned long rid;
143 unsigned long gpaddr;
144 };
145};
146
147#define NITRS 8
148#define NDTRS 8
149
150struct saved_vpd {
151 unsigned long vhpi;
152 unsigned long vgr[16];
153 unsigned long vbgr[16];
154 unsigned long vnat;
155 unsigned long vbnat;
156 unsigned long vcpuid[5];
157 unsigned long vpsr;
158 unsigned long vpr;
159 unsigned long vcr[128];
160};
161
162struct kvm_regs {
163 char *saved_guest;
164 char *saved_stack;
165 struct saved_vpd vpd;
166 /*Arch-regs*/
167 int mp_state;
168 unsigned long vmm_rr;
169 /* TR and TC. */
170 struct thash_data itrs[NITRS];
171 struct thash_data dtrs[NDTRS];
172 /* Bit is set if there is a tr/tc for the region. */
173 unsigned char itr_regions;
174 unsigned char dtr_regions;
175 unsigned char tc_regions;
176
177 char irq_check;
178 unsigned long saved_itc;
179 unsigned long itc_check;
180 unsigned long timer_check;
181 unsigned long timer_pending;
182 unsigned long last_itc;
183
184 unsigned long vrr[8];
185 unsigned long ibr[8];
186 unsigned long dbr[8];
187 unsigned long insvc[4]; /* Interrupt in service. */
188 unsigned long xtp;
189
190 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
191 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
192 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
193 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
194 unsigned long fp_psr; /*used for lazy float register */
195 unsigned long saved_gp;
196 /*for phycial emulation */
197};
198
199struct kvm_sregs {
200};
201
202struct kvm_fpu {
203};
5 204
6#endif 205#endif
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h
new file mode 100644
index 000000000000..c082c208c1f3
--- /dev/null
+++ b/include/asm-ia64/kvm_host.h
@@ -0,0 +1,524 @@
1/*
2 * kvm_host.h: used for kvm module, and hold ia64-specific sections.
3 *
4 * Copyright (C) 2007, Intel Corporation.
5 *
6 * Xiantao Zhang <xiantao.zhang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#ifndef __ASM_KVM_HOST_H
24#define __ASM_KVM_HOST_H
25
26
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/kvm.h>
30#include <linux/kvm_para.h>
31#include <linux/kvm_types.h>
32
33#include <asm/pal.h>
34#include <asm/sal.h>
35
36#define KVM_MAX_VCPUS 4
37#define KVM_MEMORY_SLOTS 32
38/* memory slots that does not exposed to userspace */
39#define KVM_PRIVATE_MEM_SLOTS 4
40
41
42/* define exit reasons from vmm to kvm*/
43#define EXIT_REASON_VM_PANIC 0
44#define EXIT_REASON_MMIO_INSTRUCTION 1
45#define EXIT_REASON_PAL_CALL 2
46#define EXIT_REASON_SAL_CALL 3
47#define EXIT_REASON_SWITCH_RR6 4
48#define EXIT_REASON_VM_DESTROY 5
49#define EXIT_REASON_EXTERNAL_INTERRUPT 6
50#define EXIT_REASON_IPI 7
51#define EXIT_REASON_PTC_G 8
52
53/*Define vmm address space and vm data space.*/
54#define KVM_VMM_SIZE (16UL<<20)
55#define KVM_VMM_SHIFT 24
56#define KVM_VMM_BASE 0xD000000000000000UL
57#define VMM_SIZE (8UL<<20)
58
59/*
60 * Define vm_buffer, used by PAL Services, base address.
61 * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
62 */
63#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
64#define KVM_VM_BUFFER_SIZE (8UL<<20)
65
66/*Define Virtual machine data layout.*/
67#define KVM_VM_DATA_SHIFT 24
68#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
69#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
70
71
72#define KVM_P2M_BASE KVM_VM_DATA_BASE
73#define KVM_P2M_OFS 0
74#define KVM_P2M_SIZE (8UL << 20)
75
76#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE)
77#define KVM_VHPT_OFS KVM_P2M_SIZE
78#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
79#define VHPT_SHIFT 18
80#define VHPT_SIZE (1UL << VHPT_SHIFT)
81#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
82
83#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
84#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
85#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
86#define VTLB_SHIFT 17
87#define VTLB_SIZE (1UL<<VTLB_SHIFT)
88#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
89
90#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
91#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
92#define KVM_VPD_BLOCK_SIZE (2UL<<20)
93#define VPD_SHIFT 16
94#define VPD_SIZE (1UL<<VPD_SHIFT)
95
96#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
97#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
98#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
99#define VCPU_SHIFT 18
100#define VCPU_SIZE (1UL<<VCPU_SHIFT)
101#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
102
103#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
104#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
105#define KVM_VM_BLOCK_SIZE (1UL<<19)
106
107#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
108#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
109#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
110
111/* Get vpd, vhpt, tlb, vcpu, base*/
112#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
113#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
114#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
115#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
116
117/*IO section definitions*/
118#define IOREQ_READ 1
119#define IOREQ_WRITE 0
120
121#define STATE_IOREQ_NONE 0
122#define STATE_IOREQ_READY 1
123#define STATE_IOREQ_INPROCESS 2
124#define STATE_IORESP_READY 3
125
126/*Guest Physical address layout.*/
127#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
128#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
129#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
130#define GPFN_PIB (3UL << 60) /* PIB base */
131#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
132#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
133#define GPFN_GFW (6UL << 60) /* Guest Firmware */
134#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
135
136#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
137#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
138#define INVALID_MFN (~0UL)
139#define MEM_G (1UL << 30)
140#define MEM_M (1UL << 20)
141#define MMIO_START (3 * MEM_G)
142#define MMIO_SIZE (512 * MEM_M)
143#define VGA_IO_START 0xA0000UL
144#define VGA_IO_SIZE 0x20000
145#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
146#define LEGACY_IO_SIZE (64 * MEM_M)
147#define IO_SAPIC_START 0xfec00000UL
148#define IO_SAPIC_SIZE 0x100000
149#define PIB_START 0xfee00000UL
150#define PIB_SIZE 0x200000
151#define GFW_START (4 * MEM_G - 16 * MEM_M)
152#define GFW_SIZE (16 * MEM_M)
153
154/*Deliver mode, defined for ioapic.c*/
155#define dest_Fixed IOSAPIC_FIXED
156#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
157
158#define NMI_VECTOR 2
159#define ExtINT_VECTOR 0
160#define NULL_VECTOR (-1)
161#define IA64_SPURIOUS_INT_VECTOR 0x0f
162
163#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
164
165/*
166 *Delivery mode
167 */
168#define SAPIC_DELIV_SHIFT 8
169#define SAPIC_FIXED 0x0
170#define SAPIC_LOWEST_PRIORITY 0x1
171#define SAPIC_PMI 0x2
172#define SAPIC_NMI 0x4
173#define SAPIC_INIT 0x5
174#define SAPIC_EXTINT 0x7
175
176/*
177 * vcpu->requests bit members for arch
178 */
179#define KVM_REQ_PTC_G 32
180#define KVM_REQ_RESUME 33
181
182#define KVM_PAGES_PER_HPAGE 1
183
184struct kvm;
185struct kvm_vcpu;
186struct kvm_guest_debug{
187};
188
189struct kvm_mmio_req {
190 uint64_t addr; /* physical address */
191 uint64_t size; /* size in bytes */
192 uint64_t data; /* data (or paddr of data) */
193 uint8_t state:4;
194 uint8_t dir:1; /* 1=read, 0=write */
195};
196
197/*Pal data struct */
198struct kvm_pal_call{
199 /*In area*/
200 uint64_t gr28;
201 uint64_t gr29;
202 uint64_t gr30;
203 uint64_t gr31;
204 /*Out area*/
205 struct ia64_pal_retval ret;
206};
207
208/* Sal data structure */
209struct kvm_sal_call{
210 /*In area*/
211 uint64_t in0;
212 uint64_t in1;
213 uint64_t in2;
214 uint64_t in3;
215 uint64_t in4;
216 uint64_t in5;
217 uint64_t in6;
218 uint64_t in7;
219 struct sal_ret_values ret;
220};
221
222/*Guest change rr6*/
223struct kvm_switch_rr6 {
224 uint64_t old_rr;
225 uint64_t new_rr;
226};
227
228union ia64_ipi_a{
229 unsigned long val;
230 struct {
231 unsigned long rv : 3;
232 unsigned long ir : 1;
233 unsigned long eid : 8;
234 unsigned long id : 8;
235 unsigned long ib_base : 44;
236 };
237};
238
239union ia64_ipi_d {
240 unsigned long val;
241 struct {
242 unsigned long vector : 8;
243 unsigned long dm : 3;
244 unsigned long ig : 53;
245 };
246};
247
248/*ipi check exit data*/
249struct kvm_ipi_data{
250 union ia64_ipi_a addr;
251 union ia64_ipi_d data;
252};
253
254/*global purge data*/
255struct kvm_ptc_g {
256 unsigned long vaddr;
257 unsigned long rr;
258 unsigned long ps;
259 struct kvm_vcpu *vcpu;
260};
261
262/*Exit control data */
263struct exit_ctl_data{
264 uint32_t exit_reason;
265 uint32_t vm_status;
266 union {
267 struct kvm_mmio_req ioreq;
268 struct kvm_pal_call pal_data;
269 struct kvm_sal_call sal_data;
270 struct kvm_switch_rr6 rr_data;
271 struct kvm_ipi_data ipi_data;
272 struct kvm_ptc_g ptc_g_data;
273 } u;
274};
275
276union pte_flags {
277 unsigned long val;
278 struct {
279 unsigned long p : 1; /*0 */
280 unsigned long : 1; /* 1 */
281 unsigned long ma : 3; /* 2-4 */
282 unsigned long a : 1; /* 5 */
283 unsigned long d : 1; /* 6 */
284 unsigned long pl : 2; /* 7-8 */
285 unsigned long ar : 3; /* 9-11 */
286 unsigned long ppn : 38; /* 12-49 */
287 unsigned long : 2; /* 50-51 */
288 unsigned long ed : 1; /* 52 */
289 };
290};
291
292union ia64_pta {
293 unsigned long val;
294 struct {
295 unsigned long ve : 1;
296 unsigned long reserved0 : 1;
297 unsigned long size : 6;
298 unsigned long vf : 1;
299 unsigned long reserved1 : 6;
300 unsigned long base : 49;
301 };
302};
303
304struct thash_cb {
305 /* THASH base information */
306 struct thash_data *hash; /* hash table pointer */
307 union ia64_pta pta;
308 int num;
309};
310
311struct kvm_vcpu_stat {
312};
313
314struct kvm_vcpu_arch {
315 int launched;
316 int last_exit;
317 int last_run_cpu;
318 int vmm_tr_slot;
319 int vm_tr_slot;
320
321#define KVM_MP_STATE_RUNNABLE 0
322#define KVM_MP_STATE_UNINITIALIZED 1
323#define KVM_MP_STATE_INIT_RECEIVED 2
324#define KVM_MP_STATE_HALTED 3
325 int mp_state;
326
327#define MAX_PTC_G_NUM 3
328 int ptc_g_count;
329 struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
330
331 /*halt timer to wake up sleepy vcpus*/
332 struct hrtimer hlt_timer;
333 long ht_active;
334
335 struct kvm_lapic *apic; /* kernel irqchip context */
336 struct vpd *vpd;
337
338 /* Exit data for vmm_transition*/
339 struct exit_ctl_data exit_data;
340
341 cpumask_t cache_coherent_map;
342
343 unsigned long vmm_rr;
344 unsigned long host_rr6;
345 unsigned long psbits[8];
346 unsigned long cr_iipa;
347 unsigned long cr_isr;
348 unsigned long vsa_base;
349 unsigned long dirty_log_lock_pa;
350 unsigned long __gp;
351 /* TR and TC. */
352 struct thash_data itrs[NITRS];
353 struct thash_data dtrs[NDTRS];
354 /* Bit is set if there is a tr/tc for the region. */
355 unsigned char itr_regions;
356 unsigned char dtr_regions;
357 unsigned char tc_regions;
358 /* purge all */
359 unsigned long ptce_base;
360 unsigned long ptce_count[2];
361 unsigned long ptce_stride[2];
362 /* itc/itm */
363 unsigned long last_itc;
364 long itc_offset;
365 unsigned long itc_check;
366 unsigned long timer_check;
367 unsigned long timer_pending;
368
369 unsigned long vrr[8];
370 unsigned long ibr[8];
371 unsigned long dbr[8];
372 unsigned long insvc[4]; /* Interrupt in service. */
373 unsigned long xtp;
374
375 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
376 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
377 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
378 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
379 unsigned long fp_psr; /*used for lazy float register */
380 unsigned long saved_gp;
381 /*for phycial emulation */
382 int mode_flags;
383 struct thash_cb vtlb;
384 struct thash_cb vhpt;
385 char irq_check;
386 char irq_new_pending;
387
388 unsigned long opcode;
389 unsigned long cause;
390 union context host;
391 union context guest;
392};
393
394struct kvm_vm_stat {
395 u64 remote_tlb_flush;
396};
397
398struct kvm_sal_data {
399 unsigned long boot_ip;
400 unsigned long boot_gp;
401};
402
403struct kvm_arch {
404 unsigned long vm_base;
405 unsigned long metaphysical_rr0;
406 unsigned long metaphysical_rr4;
407 unsigned long vmm_init_rr;
408 unsigned long vhpt_base;
409 unsigned long vtlb_base;
410 unsigned long vpd_base;
411 spinlock_t dirty_log_lock;
412 struct kvm_ioapic *vioapic;
413 struct kvm_vm_stat stat;
414 struct kvm_sal_data rdv_sal_data;
415};
416
417union cpuid3_t {
418 u64 value;
419 struct {
420 u64 number : 8;
421 u64 revision : 8;
422 u64 model : 8;
423 u64 family : 8;
424 u64 archrev : 8;
425 u64 rv : 24;
426 };
427};
428
429struct kvm_pt_regs {
430 /* The following registers are saved by SAVE_MIN: */
431 unsigned long b6; /* scratch */
432 unsigned long b7; /* scratch */
433
434 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
435 unsigned long ar_ssd; /* reserved for future use (scratch) */
436
437 unsigned long r8; /* scratch (return value register 0) */
438 unsigned long r9; /* scratch (return value register 1) */
439 unsigned long r10; /* scratch (return value register 2) */
440 unsigned long r11; /* scratch (return value register 3) */
441
442 unsigned long cr_ipsr; /* interrupted task's psr */
443 unsigned long cr_iip; /* interrupted task's instruction pointer */
444 unsigned long cr_ifs; /* interrupted task's function state */
445
446 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
447 unsigned long ar_pfs; /* prev function state */
448 unsigned long ar_rsc; /* RSE configuration */
449 /* The following two are valid only if cr_ipsr.cpl > 0: */
450 unsigned long ar_rnat; /* RSE NaT */
451 unsigned long ar_bspstore; /* RSE bspstore */
452
453 unsigned long pr; /* 64 predicate registers (1 bit each) */
454 unsigned long b0; /* return pointer (bp) */
455 unsigned long loadrs; /* size of dirty partition << 16 */
456
457 unsigned long r1; /* the gp pointer */
458 unsigned long r12; /* interrupted task's memory stack pointer */
459 unsigned long r13; /* thread pointer */
460
461 unsigned long ar_fpsr; /* floating point status (preserved) */
462 unsigned long r15; /* scratch */
463
464 /* The remaining registers are NOT saved for system calls. */
465 unsigned long r14; /* scratch */
466 unsigned long r2; /* scratch */
467 unsigned long r3; /* scratch */
468 unsigned long r16; /* scratch */
469 unsigned long r17; /* scratch */
470 unsigned long r18; /* scratch */
471 unsigned long r19; /* scratch */
472 unsigned long r20; /* scratch */
473 unsigned long r21; /* scratch */
474 unsigned long r22; /* scratch */
475 unsigned long r23; /* scratch */
476 unsigned long r24; /* scratch */
477 unsigned long r25; /* scratch */
478 unsigned long r26; /* scratch */
479 unsigned long r27; /* scratch */
480 unsigned long r28; /* scratch */
481 unsigned long r29; /* scratch */
482 unsigned long r30; /* scratch */
483 unsigned long r31; /* scratch */
484 unsigned long ar_ccv; /* compare/exchange value (scratch) */
485
486 /*
487 * Floating point registers that the kernel considers scratch:
488 */
489 struct ia64_fpreg f6; /* scratch */
490 struct ia64_fpreg f7; /* scratch */
491 struct ia64_fpreg f8; /* scratch */
492 struct ia64_fpreg f9; /* scratch */
493 struct ia64_fpreg f10; /* scratch */
494 struct ia64_fpreg f11; /* scratch */
495
496 unsigned long r4; /* preserved */
497 unsigned long r5; /* preserved */
498 unsigned long r6; /* preserved */
499 unsigned long r7; /* preserved */
500 unsigned long eml_unat; /* used for emulating instruction */
501 unsigned long pad0; /* alignment pad */
502};
503
504static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
505{
506 return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
507}
508
509typedef int kvm_vmm_entry(void);
510typedef void kvm_tramp_entry(union context *host, union context *guest);
511
512struct kvm_vmm_info{
513 struct module *module;
514 kvm_vmm_entry *vmm_entry;
515 kvm_tramp_entry *tramp_entry;
516 unsigned long vmm_ivt;
517};
518
519int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
520int kvm_emulate_halt(struct kvm_vcpu *vcpu);
521int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
522void kvm_sal_emul(struct kvm_vcpu *vcpu);
523
524#endif
diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h
new file mode 100644
index 000000000000..9f9796bb3441
--- /dev/null
+++ b/include/asm-ia64/kvm_para.h
@@ -0,0 +1,29 @@
1#ifndef __IA64_KVM_PARA_H
2#define __IA64_KVM_PARA_H
3
4/*
5 * asm-ia64/kvm_para.h
6 *
7 * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
23
24static inline unsigned int kvm_arch_para_features(void)
25{
26 return 0;
27}
28
29#endif
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index c201a2020aa4..9f020eb825c5 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -22,6 +22,7 @@ struct pci_bus;
22struct task_struct; 22struct task_struct;
23struct pci_dev; 23struct pci_dev;
24struct msi_desc; 24struct msi_desc;
25struct dma_attrs;
25 26
26typedef void ia64_mv_setup_t (char **); 27typedef void ia64_mv_setup_t (char **);
27typedef void ia64_mv_cpu_init_t (void); 28typedef void ia64_mv_cpu_init_t (void);
@@ -56,6 +57,11 @@ typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist
56typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); 57typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
57typedef int ia64_mv_dma_supported (struct device *, u64); 58typedef int ia64_mv_dma_supported (struct device *, u64);
58 59
60typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
61typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
62typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
63typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64
59/* 65/*
60 * WARNING: The legacy I/O space is _architected_. Platforms are 66 * WARNING: The legacy I/O space is _architected_. Platforms are
61 * expected to follow this architected model (see Section 10.7 in the 67 * expected to follow this architected model (see Section 10.7 in the
@@ -136,10 +142,10 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
136# define platform_dma_init ia64_mv.dma_init 142# define platform_dma_init ia64_mv.dma_init
137# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent 143# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
138# define platform_dma_free_coherent ia64_mv.dma_free_coherent 144# define platform_dma_free_coherent ia64_mv.dma_free_coherent
139# define platform_dma_map_single ia64_mv.dma_map_single 145# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
140# define platform_dma_unmap_single ia64_mv.dma_unmap_single 146# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
141# define platform_dma_map_sg ia64_mv.dma_map_sg 147# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
142# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg 148# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
143# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu 149# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
144# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu 150# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
145# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device 151# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
@@ -190,10 +196,10 @@ struct ia64_machine_vector {
190 ia64_mv_dma_init *dma_init; 196 ia64_mv_dma_init *dma_init;
191 ia64_mv_dma_alloc_coherent *dma_alloc_coherent; 197 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
192 ia64_mv_dma_free_coherent *dma_free_coherent; 198 ia64_mv_dma_free_coherent *dma_free_coherent;
193 ia64_mv_dma_map_single *dma_map_single; 199 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
194 ia64_mv_dma_unmap_single *dma_unmap_single; 200 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
195 ia64_mv_dma_map_sg *dma_map_sg; 201 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
196 ia64_mv_dma_unmap_sg *dma_unmap_sg; 202 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
197 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; 203 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
198 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; 204 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
199 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; 205 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
@@ -240,10 +246,10 @@ struct ia64_machine_vector {
240 platform_dma_init, \ 246 platform_dma_init, \
241 platform_dma_alloc_coherent, \ 247 platform_dma_alloc_coherent, \
242 platform_dma_free_coherent, \ 248 platform_dma_free_coherent, \
243 platform_dma_map_single, \ 249 platform_dma_map_single_attrs, \
244 platform_dma_unmap_single, \ 250 platform_dma_unmap_single_attrs, \
245 platform_dma_map_sg, \ 251 platform_dma_map_sg_attrs, \
246 platform_dma_unmap_sg, \ 252 platform_dma_unmap_sg_attrs, \
247 platform_dma_sync_single_for_cpu, \ 253 platform_dma_sync_single_for_cpu, \
248 platform_dma_sync_sg_for_cpu, \ 254 platform_dma_sync_sg_for_cpu, \
249 platform_dma_sync_single_for_device, \ 255 platform_dma_sync_single_for_device, \
@@ -292,9 +298,13 @@ extern ia64_mv_dma_init swiotlb_init;
292extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 298extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
293extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 299extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
294extern ia64_mv_dma_map_single swiotlb_map_single; 300extern ia64_mv_dma_map_single swiotlb_map_single;
301extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
295extern ia64_mv_dma_unmap_single swiotlb_unmap_single; 302extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
303extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
296extern ia64_mv_dma_map_sg swiotlb_map_sg; 304extern ia64_mv_dma_map_sg swiotlb_map_sg;
305extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
297extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; 306extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
307extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
298extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; 308extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
299extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; 309extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
300extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; 310extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
@@ -340,17 +350,17 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
340#ifndef platform_dma_free_coherent 350#ifndef platform_dma_free_coherent
341# define platform_dma_free_coherent swiotlb_free_coherent 351# define platform_dma_free_coherent swiotlb_free_coherent
342#endif 352#endif
343#ifndef platform_dma_map_single 353#ifndef platform_dma_map_single_attrs
344# define platform_dma_map_single swiotlb_map_single 354# define platform_dma_map_single_attrs swiotlb_map_single_attrs
345#endif 355#endif
346#ifndef platform_dma_unmap_single 356#ifndef platform_dma_unmap_single_attrs
347# define platform_dma_unmap_single swiotlb_unmap_single 357# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
348#endif 358#endif
349#ifndef platform_dma_map_sg 359#ifndef platform_dma_map_sg_attrs
350# define platform_dma_map_sg swiotlb_map_sg 360# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
351#endif 361#endif
352#ifndef platform_dma_unmap_sg 362#ifndef platform_dma_unmap_sg_attrs
353# define platform_dma_unmap_sg swiotlb_unmap_sg 363# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
354#endif 364#endif
355#ifndef platform_dma_sync_single_for_cpu 365#ifndef platform_dma_sync_single_for_cpu
356# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu 366# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h
index e90daf9ce340..2f57f5144b9f 100644
--- a/include/asm-ia64/machvec_hpzx1.h
+++ b/include/asm-ia64/machvec_hpzx1.h
@@ -4,10 +4,10 @@
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
6extern ia64_mv_dma_free_coherent sba_free_coherent; 6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single sba_map_single; 7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single sba_unmap_single; 8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg sba_map_sg; 9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg sba_unmap_sg; 10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported; 11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error; 12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 13
@@ -23,10 +23,10 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
23#define platform_dma_init machvec_noop 23#define platform_dma_init machvec_noop
24#define platform_dma_alloc_coherent sba_alloc_coherent 24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent 25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single sba_map_single 26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single sba_unmap_single 27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg sba_map_sg 28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg sba_unmap_sg 29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single 30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg 31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single 32#define platform_dma_sync_single_for_device machvec_dma_sync_single
diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h
index f00a34a148ff..a842cdda827b 100644
--- a/include/asm-ia64/machvec_hpzx1_swiotlb.h
+++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h
@@ -4,10 +4,10 @@
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent; 6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single hwsw_map_single; 7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single hwsw_unmap_single; 8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg hwsw_map_sg; 9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg hwsw_unmap_sg; 10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported; 11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; 12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; 13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
@@ -28,10 +28,10 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
28#define platform_dma_init machvec_noop 28#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 29#define platform_dma_alloc_coherent hwsw_alloc_coherent
30#define platform_dma_free_coherent hwsw_free_coherent 30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single hwsw_map_single 31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single hwsw_unmap_single 32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg hwsw_map_sg 33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg hwsw_unmap_sg 34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported 35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error 36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu 37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index 61439a7f5b08..781308ea7b88 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -57,10 +57,10 @@ extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; 58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent; 59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single sn_dma_map_single; 60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single sn_dma_unmap_single; 61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg sn_dma_map_sg; 62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; 63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; 64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; 65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; 66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
@@ -113,10 +113,10 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
113#define platform_dma_init machvec_noop 113#define platform_dma_init machvec_noop
114#define platform_dma_alloc_coherent sn_dma_alloc_coherent 114#define platform_dma_alloc_coherent sn_dma_alloc_coherent
115#define platform_dma_free_coherent sn_dma_free_coherent 115#define platform_dma_free_coherent sn_dma_free_coherent
116#define platform_dma_map_single sn_dma_map_single 116#define platform_dma_map_single_attrs sn_dma_map_single_attrs
117#define platform_dma_unmap_single sn_dma_unmap_single 117#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
118#define platform_dma_map_sg sn_dma_map_sg 118#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
119#define platform_dma_unmap_sg sn_dma_unmap_sg 119#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
120#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu 120#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
121#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu 121#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
122#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device 122#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index f1663aa94a52..18a4321349a3 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
157struct ia64_mca_notify_die { 157struct ia64_mca_notify_die {
158 struct ia64_sal_os_state *sos; 158 struct ia64_sal_os_state *sos;
159 int *monarch_cpu; 159 int *monarch_cpu;
160 int *data;
160}; 161};
161 162
162DECLARE_PER_CPU(u64, ia64_mca_pal_base); 163DECLARE_PER_CPU(u64, ia64_mca_pal_base);
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index f93308f54b61..7245a5781594 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -35,6 +35,7 @@ extern void find_memory (void);
35extern void reserve_memory (void); 35extern void reserve_memory (void);
36extern void find_initrd (void); 36extern void find_initrd (void);
37extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); 37extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
38extern int filter_memory (unsigned long start, unsigned long end, void *arg);
38extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); 39extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
39extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); 40extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
40 41
@@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
56 57
57#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ 58#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
58 59
59extern int register_active_ranges(u64 start, u64 end, void *arg); 60extern int register_active_ranges(u64 start, u64 len, int nid);
60 61
61#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
62# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h
index 6a8a27cfae3e..3499ff57bf42 100644
--- a/include/asm-ia64/numa.h
+++ b/include/asm-ia64/numa.h
@@ -22,6 +22,8 @@
22 22
23#include <asm/mmzone.h> 23#include <asm/mmzone.h>
24 24
25#define NUMA_NO_NODE -1
26
25extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 27extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
26extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 28extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
27extern pg_data_t *pgdat_list[MAX_NUMNODES]; 29extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 4999a6c63775..36f39321b768 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -54,9 +54,6 @@
54# define HPAGE_MASK (~(HPAGE_SIZE - 1)) 54# define HPAGE_MASK (~(HPAGE_SIZE - 1))
55 55
56# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 56# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
57# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
58# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
59# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
60#endif /* CONFIG_HUGETLB_PAGE */ 57#endif /* CONFIG_HUGETLB_PAGE */
61 58
62#ifdef __ASSEMBLY__ 59#ifdef __ASSEMBLY__
@@ -153,9 +150,6 @@ typedef union ia64_va {
153# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ 150# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
154 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) 151 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
155# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 152# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
156# define is_hugepage_only_range(mm, addr, len) \
157 (REGION_NUMBER(addr) == RGN_HPAGE || \
158 REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
159extern unsigned int hpage_shift; 153extern unsigned int hpage_shift;
160#endif 154#endif
161 155
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 8a695d3407d2..67b02901ead4 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -13,6 +13,7 @@
13 * Copyright (C) 1999 VA Linux Systems 13 * Copyright (C) 1999 VA Linux Systems
14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
15 * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> 15 * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
16 * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
16 * 17 *
17 * 99/10/01 davidm Make sure we pass zero for reserved parameters. 18 * 99/10/01 davidm Make sure we pass zero for reserved parameters.
18 * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. 19 * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
@@ -73,6 +74,8 @@
73#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ 74#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
74#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ 75#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
75#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ 76#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
77#define PAL_VP_INFO 50 /* Information about virtual processor features */
78#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */
76 79
77#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 80#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
78#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ 81#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s {
504 wiv : 1, /* Way field valid */ 507 wiv : 1, /* Way field valid */
505 reserved2 : 1, 508 reserved2 : 1,
506 dp : 1, /* Data poisoned on MBE */ 509 dp : 1, /* Data poisoned on MBE */
507 reserved3 : 8, 510 reserved3 : 6,
511 hlth : 2, /* Health indicator */
508 512
509 index : 20, /* Cache line index */ 513 index : 20, /* Cache line index */
510 reserved4 : 2, 514 reserved4 : 2,
@@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s {
542 dtc : 1, /* Fail in data TC */ 546 dtc : 1, /* Fail in data TC */
543 itc : 1, /* Fail in inst. TC */ 547 itc : 1, /* Fail in inst. TC */
544 op : 4, /* Cache operation */ 548 op : 4, /* Cache operation */
545 reserved3 : 30, 549 reserved3 : 6,
550 hlth : 2, /* Health indicator */
551 reserved4 : 22,
546 552
547 is : 1, /* instruction set (1 == ia32) */ 553 is : 1, /* instruction set (1 == ia32) */
548 iv : 1, /* instruction set field valid */ 554 iv : 1, /* instruction set field valid */
@@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s {
633 way : 6, /* Way of structure */ 639 way : 6, /* Way of structure */
634 wv : 1, /* way valid */ 640 wv : 1, /* way valid */
635 xv : 1, /* index valid */ 641 xv : 1, /* index valid */
636 reserved1 : 8, 642 reserved1 : 6,
643 hlth : 2, /* Health indicator */
637 index : 8, /* Index or set of the uarch 644 index : 8, /* Index or set of the uarch
638 * structure that failed. 645 * structure that failed.
639 */ 646 */
@@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void)
1213 1220
1214/* Return the machine check dynamic processor state */ 1221/* Return the machine check dynamic processor state */
1215static inline s64 1222static inline s64
1216ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds) 1223ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
1217{ 1224{
1218 struct ia64_pal_retval iprv; 1225 struct ia64_pal_retval iprv;
1219 PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); 1226 PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
1220 if (size) 1227 if (size)
1221 *size = iprv.v0; 1228 *size = iprv.v0;
1222 if (pds)
1223 *pds = iprv.v1;
1224 return iprv.status; 1229 return iprv.status;
1225} 1230}
1226 1231
@@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous)
1281 return iprv.status; 1286 return iprv.status;
1282} 1287}
1283 1288
1289typedef union pal_hw_tracking_u {
1290 u64 pht_data;
1291 struct {
1292 u64 itc :4, /* Instruction cache tracking */
1293 dct :4, /* Date cache tracking */
1294 itt :4, /* Instruction TLB tracking */
1295 ddt :4, /* Data TLB tracking */
1296 reserved:48;
1297 } pal_hw_tracking_s;
1298} pal_hw_tracking_u_t;
1299
1300/*
1301 * Hardware tracking status.
1302 */
1303static inline s64
1304ia64_pal_mc_hw_tracking (u64 *status)
1305{
1306 struct ia64_pal_retval iprv;
1307 PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
1308 if (status)
1309 *status = iprv.v0;
1310 return iprv.status;
1311}
1312
1284/* Register a platform dependent location with PAL to which it can save 1313/* Register a platform dependent location with PAL to which it can save
1285 * minimal processor state in the event of a machine check or initialization 1314 * minimal processor state in the event of a machine check or initialization
1286 * event. 1315 * event.
1287 */ 1316 */
1288static inline s64 1317static inline s64
1289ia64_pal_mc_register_mem (u64 physical_addr) 1318ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
1290{ 1319{
1291 struct ia64_pal_retval iprv; 1320 struct ia64_pal_retval iprv;
1292 PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); 1321 PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
1322 if (req_size)
1323 *req_size = iprv.v0;
1293 return iprv.status; 1324 return iprv.status;
1294} 1325}
1295 1326
@@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
1631 return iprv.status; 1662 return iprv.status;
1632} 1663}
1633 1664
1665typedef union pal_vp_info_u {
1666 u64 pvi_val;
1667 struct {
1668 u64 index: 48, /* virtual feature set info */
1669 vmm_id: 16; /* feature set id */
1670 } pal_vp_info_s;
1671} pal_vp_info_u_t;
1672
1673/*
1674 * Returns infomation about virtual processor features
1675 */
1676static inline s64
1677ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
1678{
1679 struct ia64_pal_retval iprv;
1680 PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
1681 if (vp_info)
1682 *vp_info = iprv.v0;
1683 if (vmm_id)
1684 *vmm_id = iprv.v1;
1685 return iprv.status;
1686}
1687
1634typedef union pal_itr_valid_u { 1688typedef union pal_itr_valid_u {
1635 u64 piv_val; 1689 u64 piv_val;
1636 struct { 1690 struct {
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e6204f14f614..7a9bff47564f 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -302,6 +302,8 @@ ia64_phys_addr_valid (unsigned long addr)
302#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) 302#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
303#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) 303#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
304#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0) 304#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
305#define pte_special(pte) 0
306
305/* 307/*
306 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the 308 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
307 * access rights: 309 * access rights:
@@ -313,6 +315,7 @@ ia64_phys_addr_valid (unsigned long addr)
313#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) 315#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
314#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) 316#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
315#define pte_mkhuge(pte) (__pte(pte_val(pte))) 317#define pte_mkhuge(pte) (__pte(pte_val(pte)))
318#define pte_mkspecial(pte) (pte)
316 319
317/* 320/*
318 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to 321 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
@@ -371,7 +374,7 @@ pgd_index (unsigned long address)
371/* The offset in the 1-level directory is given by the 3 region bits 374/* The offset in the 1-level directory is given by the 3 region bits
372 (61..63) and the level-1 bits. */ 375 (61..63) and the level-1 bits. */
373static inline pgd_t* 376static inline pgd_t*
374pgd_offset (struct mm_struct *mm, unsigned long address) 377pgd_offset (const struct mm_struct *mm, unsigned long address)
375{ 378{
376 return mm->pgd + pgd_index(address); 379 return mm->pgd + pgd_index(address);
377} 380}
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 741f7ecb986a..6aff126fc07e 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -119,6 +119,69 @@ struct ia64_psr {
119 __u64 reserved4 : 19; 119 __u64 reserved4 : 19;
120}; 120};
121 121
122union ia64_isr {
123 __u64 val;
124 struct {
125 __u64 code : 16;
126 __u64 vector : 8;
127 __u64 reserved1 : 8;
128 __u64 x : 1;
129 __u64 w : 1;
130 __u64 r : 1;
131 __u64 na : 1;
132 __u64 sp : 1;
133 __u64 rs : 1;
134 __u64 ir : 1;
135 __u64 ni : 1;
136 __u64 so : 1;
137 __u64 ei : 2;
138 __u64 ed : 1;
139 __u64 reserved2 : 20;
140 };
141};
142
143union ia64_lid {
144 __u64 val;
145 struct {
146 __u64 rv : 16;
147 __u64 eid : 8;
148 __u64 id : 8;
149 __u64 ig : 32;
150 };
151};
152
153union ia64_tpr {
154 __u64 val;
155 struct {
156 __u64 ig0 : 4;
157 __u64 mic : 4;
158 __u64 rsv : 8;
159 __u64 mmi : 1;
160 __u64 ig1 : 47;
161 };
162};
163
164union ia64_itir {
165 __u64 val;
166 struct {
167 __u64 rv3 : 2; /* 0-1 */
168 __u64 ps : 6; /* 2-7 */
169 __u64 key : 24; /* 8-31 */
170 __u64 rv4 : 32; /* 32-63 */
171 };
172};
173
174union ia64_rr {
175 __u64 val;
176 struct {
177 __u64 ve : 1; /* enable hw walker */
178 __u64 reserved0: 1; /* reserved */
179 __u64 ps : 6; /* log page size */
180 __u64 rid : 24; /* region id */
181 __u64 reserved1: 32; /* reserved */
182 };
183};
184
122/* 185/*
123 * CPU type, hardware bug flags, and per-CPU state. Frequently used 186 * CPU type, hardware bug flags, and per-CPU state. Frequently used
124 * state comes earlier: 187 * state comes earlier:
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index f4904db3b057..89594b442f83 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -296,6 +296,9 @@ enum {
296 EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) 296 EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
297#define SAL_PLAT_BUS_ERR_SECT_GUID \ 297#define SAL_PLAT_BUS_ERR_SECT_GUID \
298 EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) 298 EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
299#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
300 EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
301 0xca, 0x4d)
299 302
300#define MAX_CACHE_ERRORS 6 303#define MAX_CACHE_ERRORS 6
301#define MAX_TLB_ERRORS 6 304#define MAX_TLB_ERRORS 6
@@ -879,6 +882,24 @@ extern void ia64_jump_to_sal(struct sal_to_os_boot *);
879 882
880extern void ia64_sal_handler_init(void *entry_point, void *gpval); 883extern void ia64_sal_handler_init(void *entry_point, void *gpval);
881 884
885#define PALO_MAX_TLB_PURGES 0xFFFF
886#define PALO_SIG "PALO"
887
888struct palo_table {
889 u8 signature[4]; /* Should be "PALO" */
890 u32 length;
891 u8 minor_revision;
892 u8 major_revision;
893 u8 checksum;
894 u8 reserved1[5];
895 u16 max_tlb_purges;
896 u8 reserved2[6];
897};
898
899#define NPTCG_FROM_PAL 0
900#define NPTCG_FROM_PALO 1
901#define NPTCG_FROM_KERNEL_PARAMETER 2
902
882#endif /* __ASSEMBLY__ */ 903#endif /* __ASSEMBLY__ */
883 904
884#endif /* _ASM_IA64_SAL_H */ 905#endif /* _ASM_IA64_SAL_H */
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index d8393d11288d..d9b2034ed1d2 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -1,99 +1 @@
1#ifndef _ASM_IA64_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_IA64_SEMAPHORE_H
3
4/*
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13
14struct semaphore {
15 atomic_t count;
16 int sleepers;
17 wait_queue_head_t wait;
18};
19
20#define __SEMAPHORE_INITIALIZER(name, n) \
21{ \
22 .count = ATOMIC_INIT(n), \
23 .sleepers = 0, \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
25}
26
27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
28 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
29
30#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
31
32static inline void
33sema_init (struct semaphore *sem, int val)
34{
35 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
36}
37
38static inline void
39init_MUTEX (struct semaphore *sem)
40{
41 sema_init(sem, 1);
42}
43
44static inline void
45init_MUTEX_LOCKED (struct semaphore *sem)
46{
47 sema_init(sem, 0);
48}
49
50extern void __down (struct semaphore * sem);
51extern int __down_interruptible (struct semaphore * sem);
52extern int __down_trylock (struct semaphore * sem);
53extern void __up (struct semaphore * sem);
54
55/*
56 * Atomically decrement the semaphore's count. If it goes negative,
57 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
58 */
59static inline void
60down (struct semaphore *sem)
61{
62 might_sleep();
63 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
64 __down(sem);
65}
66
67/*
68 * Atomically decrement the semaphore's count. If it goes negative,
69 * block the calling thread in the TASK_INTERRUPTIBLE state.
70 */
71static inline int
72down_interruptible (struct semaphore * sem)
73{
74 int ret = 0;
75
76 might_sleep();
77 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
78 ret = __down_interruptible(sem);
79 return ret;
80}
81
82static inline int
83down_trylock (struct semaphore *sem)
84{
85 int ret = 0;
86
87 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
88 ret = __down_trylock(sem);
89 return ret;
90}
91
92static inline void
93up (struct semaphore * sem)
94{
95 if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
96 __up(sem);
97}
98
99#endif /* _ASM_IA64_SEMAPHORE_H */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 4fa733dd417a..ec5f355fb7e3 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,9 @@ ia64_get_lid (void)
38 return lid.f.id << 8 | lid.f.eid; 38 return lid.f.id << 8 | lid.f.eid;
39} 39}
40 40
41extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
42 void *info, int wait);
43
41#define hard_smp_processor_id() ia64_get_lid() 44#define hard_smp_processor_id() ia64_get_lid()
42 45
43#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h
index 6f6d69e39ff5..ee118b901de4 100644
--- a/include/asm-ia64/sn/nodepda.h
+++ b/include/asm-ia64/sn/nodepda.h
@@ -9,7 +9,6 @@
9#define _ASM_IA64_SN_NODEPDA_H 9#define _ASM_IA64_SN_NODEPDA_H
10 10
11 11
12#include <asm/semaphore.h>
13#include <asm/irq.h> 12#include <asm/irq.h>
14#include <asm/sn/arch.h> 13#include <asm/sn/arch.h>
15#include <asm/sn/intr.h> 14#include <asm/sn/intr.h>
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
deleted file mode 100644
index f7711b308e48..000000000000
--- a/include/asm-ia64/sn/xp.h
+++ /dev/null
@@ -1,485 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9
10/*
11 * External Cross Partition (XP) structures and defines.
12 */
13
14
15#ifndef _ASM_IA64_SN_XP_H
16#define _ASM_IA64_SN_XP_H
17
18
19#include <linux/cache.h>
20#include <linux/hardirq.h>
21#include <linux/mutex.h>
22#include <asm/sn/types.h>
23#include <asm/sn/bte.h>
24
25
26#ifdef USE_DBUG_ON
27#define DBUG_ON(condition) BUG_ON(condition)
28#else
29#define DBUG_ON(condition)
30#endif
31
32
33/*
34 * Define the maximum number of logically defined partitions the system
35 * can support. It is constrained by the maximum number of hardware
36 * partitionable regions. The term 'region' in this context refers to the
37 * minimum number of nodes that can comprise an access protection grouping.
38 * The access protection is in regards to memory, IPI and IOI.
39 *
40 * The maximum number of hardware partitionable regions is equal to the
41 * maximum number of nodes in the entire system divided by the minimum number
42 * of nodes that comprise an access protection grouping.
43 */
44#define XP_MAX_PARTITIONS 64
45
46
47/*
48 * Define the number of u64s required to represent all the C-brick nasids
49 * as a bitmap. The cross-partition kernel modules deal only with
50 * C-brick nasids, thus the need for bitmaps which don't account for
51 * odd-numbered (non C-brick) nasids.
52 */
53#define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2)
54#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
55#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
56
57
58/*
59 * Wrapper for bte_copy() that should it return a failure status will retry
60 * the bte_copy() once in the hope that the failure was due to a temporary
61 * aberration (i.e., the link going down temporarily).
62 *
63 * src - physical address of the source of the transfer.
64 * vdst - virtual address of the destination of the transfer.
65 * len - number of bytes to transfer from source to destination.
66 * mode - see bte_copy() for definition.
67 * notification - see bte_copy() for definition.
68 *
69 * Note: xp_bte_copy() should never be called while holding a spinlock.
70 */
71static inline bte_result_t
72xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification)
73{
74 bte_result_t ret;
75 u64 pdst = ia64_tpa(vdst);
76
77
78 /*
79 * Ensure that the physically mapped memory is contiguous.
80 *
81 * We do this by ensuring that the memory is from region 7 only.
82 * If the need should arise to use memory from one of the other
83 * regions, then modify the BUG_ON() statement to ensure that the
84 * memory from that region is always physically contiguous.
85 */
86 BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL);
87
88 ret = bte_copy(src, pdst, len, mode, notification);
89 if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) {
90 if (!in_interrupt()) {
91 cond_resched();
92 }
93 ret = bte_copy(src, pdst, len, mode, notification);
94 }
95
96 return ret;
97}
98
99
100/*
101 * XPC establishes channel connections between the local partition and any
102 * other partition that is currently up. Over these channels, kernel-level
103 * `users' can communicate with their counterparts on the other partitions.
104 *
105 * The maxinum number of channels is limited to eight. For performance reasons,
106 * the internal cross partition structures require sixteen bytes per channel,
107 * and eight allows all of this interface-shared info to fit in one cache line.
108 *
109 * XPC_NCHANNELS reflects the total number of channels currently defined.
110 * If the need for additional channels arises, one can simply increase
111 * XPC_NCHANNELS accordingly. If the day should come where that number
112 * exceeds the MAXIMUM number of channels allowed (eight), then one will need
113 * to make changes to the XPC code to allow for this.
114 */
115#define XPC_MEM_CHANNEL 0 /* memory channel number */
116#define XPC_NET_CHANNEL 1 /* network channel number */
117
118#define XPC_NCHANNELS 2 /* #of defined channels */
119#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */
120
121#if XPC_NCHANNELS > XPC_MAX_NCHANNELS
122#error XPC_NCHANNELS exceeds MAXIMUM allowed.
123#endif
124
125
126/*
127 * The format of an XPC message is as follows:
128 *
129 * +-------+--------------------------------+
130 * | flags |////////////////////////////////|
131 * +-------+--------------------------------+
132 * | message # |
133 * +----------------------------------------+
134 * | payload (user-defined message) |
135 * | |
136 * :
137 * | |
138 * +----------------------------------------+
139 *
140 * The size of the payload is defined by the user via xpc_connect(). A user-
141 * defined message resides in the payload area.
142 *
143 * The user should have no dealings with the message header, but only the
144 * message's payload. When a message entry is allocated (via xpc_allocate())
145 * a pointer to the payload area is returned and not the actual beginning of
146 * the XPC message. The user then constructs a message in the payload area
147 * and passes that pointer as an argument on xpc_send() or xpc_send_notify().
148 *
149 * The size of a message entry (within a message queue) must be a cacheline
150 * sized multiple in order to facilitate the BTE transfer of messages from one
151 * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user
152 * that wants to fit as many msg entries as possible in a given memory size
153 * (e.g. a memory page).
154 */
155struct xpc_msg {
156 u8 flags; /* FOR XPC INTERNAL USE ONLY */
157 u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
158 s64 number; /* FOR XPC INTERNAL USE ONLY */
159
160 u64 payload; /* user defined portion of message */
161};
162
163
164#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
165#define XPC_MSG_SIZE(_payload_size) \
166 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
167
168
169/*
170 * Define the return values and values passed to user's callout functions.
171 * (It is important to add new value codes at the end just preceding
172 * xpcUnknownReason, which must have the highest numerical value.)
173 */
174enum xpc_retval {
175 xpcSuccess = 0,
176
177 xpcNotConnected, /* 1: channel is not connected */
178 xpcConnected, /* 2: channel connected (opened) */
179 xpcRETIRED1, /* 3: (formerly xpcDisconnected) */
180
181 xpcMsgReceived, /* 4: message received */
182 xpcMsgDelivered, /* 5: message delivered and acknowledged */
183
184 xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */
185
186 xpcNoWait, /* 7: operation would require wait */
187 xpcRetry, /* 8: retry operation */
188 xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
189 xpcInterrupted, /* 10: interrupted wait */
190
191 xpcUnequalMsgSizes, /* 11: message size disparity between sides */
192 xpcInvalidAddress, /* 12: invalid address */
193
194 xpcNoMemory, /* 13: no memory available for XPC structures */
195 xpcLackOfResources, /* 14: insufficient resources for operation */
196 xpcUnregistered, /* 15: channel is not registered */
197 xpcAlreadyRegistered, /* 16: channel is already registered */
198
199 xpcPartitionDown, /* 17: remote partition is down */
200 xpcNotLoaded, /* 18: XPC module is not loaded */
201 xpcUnloading, /* 19: this side is unloading XPC module */
202
203 xpcBadMagic, /* 20: XPC MAGIC string not found */
204
205 xpcReactivating, /* 21: remote partition was reactivated */
206
207 xpcUnregistering, /* 22: this side is unregistering channel */
208 xpcOtherUnregistering, /* 23: other side is unregistering channel */
209
210 xpcCloneKThread, /* 24: cloning kernel thread */
211 xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */
212
213 xpcNoHeartbeat, /* 26: remote partition has no heartbeat */
214
215 xpcPioReadError, /* 27: PIO read error */
216 xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */
217
218 xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */
219 xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */
220 xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */
221 xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */
222 xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */
223 xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */
224 xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */
225 xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */
226 xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */
227 xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */
228
229 xpcBadVersion, /* 39: bad version number */
230 xpcVarsNotSet, /* 40: the XPC variables are not set up */
231 xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
232 xpcInvalidPartid, /* 42: invalid partition ID */
233 xpcLocalPartid, /* 43: local partition ID */
234
235 xpcOtherGoingDown, /* 44: other side going down, reason unknown */
236 xpcSystemGoingDown, /* 45: system is going down, reason unknown */
237 xpcSystemHalt, /* 46: system is being halted */
238 xpcSystemReboot, /* 47: system is being rebooted */
239 xpcSystemPoweroff, /* 48: system is being powered off */
240
241 xpcDisconnecting, /* 49: channel disconnecting (closing) */
242
243 xpcOpenCloseError, /* 50: channel open/close protocol error */
244
245 xpcDisconnected, /* 51: channel disconnected (closed) */
246
247 xpcBteSh2Start, /* 52: BTE CRB timeout */
248
249 /* 53: 0x1 BTE Error Response Short */
250 xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT,
251
252 /* 54: 0x2 BTE Error Response Long */
253 xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG,
254
255 /* 56: 0x4 BTE Error Response DSB */
256 xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP,
257
258 /* 60: 0x8 BTE Error Response Access */
259 xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS,
260
261 /* 68: 0x10 BTE Error CRB timeout */
262 xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO,
263
264 /* 84: 0x20 BTE Error NACK limit */
265 xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT,
266
267 /* 115: BTE end */
268 xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
269
270 xpcUnknownReason /* 116: unknown reason -- must be last in list */
271};
272
273
274/*
275 * Define the callout function types used by XPC to update the user on
276 * connection activity and state changes (via the user function registered by
277 * xpc_connect()) and to notify them of messages received and delivered (via
278 * the user function registered by xpc_send_notify()).
279 *
280 * The two function types are xpc_channel_func and xpc_notify_func and
281 * both share the following arguments, with the exception of "data", which
282 * only xpc_channel_func has.
283 *
284 * Arguments:
285 *
286 * reason - reason code. (See following table.)
287 * partid - partition ID associated with condition.
288 * ch_number - channel # associated with condition.
289 * data - pointer to optional data. (See following table.)
290 * key - pointer to optional user-defined value provided as the "key"
291 * argument to xpc_connect() or xpc_send_notify().
292 *
293 * In the following table the "Optional Data" column applies to callouts made
294 * to functions registered by xpc_connect(). A "NA" in that column indicates
295 * that this reason code can be passed to functions registered by
296 * xpc_send_notify() (i.e. they don't have data arguments).
297 *
298 * Also, the first three reason codes in the following table indicate
299 * success, whereas the others indicate failure. When a failure reason code
300 * is received, one can assume that the channel is not connected.
301 *
302 *
303 * Reason Code | Cause | Optional Data
304 * =====================+================================+=====================
305 * xpcConnected | connection has been established| max #of entries
306 * | to the specified partition on | allowed in message
307 * | the specified channel | queue
308 * ---------------------+--------------------------------+---------------------
309 * xpcMsgReceived | an XPC message arrived from | address of payload
310 * | the specified partition on the |
311 * | specified channel | [the user must call
312 * | | xpc_received() when
313 * | | finished with the
314 * | | payload]
315 * ---------------------+--------------------------------+---------------------
316 * xpcMsgDelivered | notification that the message | NA
317 * | was delivered to the intended |
318 * | recipient and that they have |
319 * | acknowledged its receipt by |
320 * | calling xpc_received() |
321 * =====================+================================+=====================
322 * xpcUnequalMsgSizes | can't connect to the specified | NULL
323 * | partition on the specified |
324 * | channel because of mismatched |
325 * | message sizes |
326 * ---------------------+--------------------------------+---------------------
327 * xpcNoMemory | insufficient memory avaiable | NULL
328 * | to allocate message queue |
329 * ---------------------+--------------------------------+---------------------
330 * xpcLackOfResources | lack of resources to create | NULL
331 * | the necessary kthreads to |
332 * | support the channel |
333 * ---------------------+--------------------------------+---------------------
334 * xpcUnregistering | this side's user has | NULL or NA
335 * | unregistered by calling |
336 * | xpc_disconnect() |
337 * ---------------------+--------------------------------+---------------------
338 * xpcOtherUnregistering| the other side's user has | NULL or NA
339 * | unregistered by calling |
340 * | xpc_disconnect() |
341 * ---------------------+--------------------------------+---------------------
342 * xpcNoHeartbeat | the other side's XPC is no | NULL or NA
343 * | longer heartbeating |
344 * | |
345 * ---------------------+--------------------------------+---------------------
346 * xpcUnloading | this side's XPC module is | NULL or NA
347 * | being unloaded |
348 * | |
349 * ---------------------+--------------------------------+---------------------
350 * xpcOtherUnloading | the other side's XPC module is | NULL or NA
351 * | is being unloaded |
352 * | |
353 * ---------------------+--------------------------------+---------------------
354 * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA
355 * | error while sending an IPI |
356 * | |
357 * ---------------------+--------------------------------+---------------------
358 * xpcInvalidAddress | the address either received or | NULL or NA
359 * | sent by the specified partition|
360 * | is invalid |
361 * ---------------------+--------------------------------+---------------------
362 * xpcBteNotAvailable | attempt to pull data from the | NULL or NA
363 * xpcBtePoisonError | specified partition over the |
364 * xpcBteWriteError | specified channel via a |
365 * xpcBteAccessError | bte_copy() failed |
366 * xpcBteTimeOutError | |
367 * xpcBteXtalkError | |
368 * xpcBteDirectoryError | |
369 * xpcBteGenericError | |
370 * xpcBteUnmappedError | |
371 * ---------------------+--------------------------------+---------------------
372 * xpcUnknownReason | the specified channel to the | NULL or NA
373 * | specified partition was |
374 * | unavailable for unknown reasons|
375 * =====================+================================+=====================
376 */
377
378typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
379 int ch_number, void *data, void *key);
380
381typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
382 int ch_number, void *key);
383
384
385/*
386 * The following is a registration entry. There is a global array of these,
387 * one per channel. It is used to record the connection registration made
388 * by the users of XPC. As long as a registration entry exists, for any
389 * partition that comes up, XPC will attempt to establish a connection on
390 * that channel. Notification that a connection has been made will occur via
391 * the xpc_channel_func function.
392 *
393 * The 'func' field points to the function to call when aynchronous
394 * notification is required for such events as: a connection established/lost,
395 * or an incoming message received, or an error condition encountered. A
396 * non-NULL 'func' field indicates that there is an active registration for
397 * the channel.
398 */
399struct xpc_registration {
400 struct mutex mutex;
401 xpc_channel_func func; /* function to call */
402 void *key; /* pointer to user's key */
403 u16 nentries; /* #of msg entries in local msg queue */
404 u16 msg_size; /* message queue's message size */
405 u32 assigned_limit; /* limit on #of assigned kthreads */
406 u32 idle_limit; /* limit on #of idle kthreads */
407} ____cacheline_aligned;
408
409
410#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
411
412
413/* the following are valid xpc_allocate() flags */
414#define XPC_WAIT 0 /* wait flag */
415#define XPC_NOWAIT 1 /* no wait flag */
416
417
418struct xpc_interface {
419 void (*connect)(int);
420 void (*disconnect)(int);
421 enum xpc_retval (*allocate)(partid_t, int, u32, void **);
422 enum xpc_retval (*send)(partid_t, int, void *);
423 enum xpc_retval (*send_notify)(partid_t, int, void *,
424 xpc_notify_func, void *);
425 void (*received)(partid_t, int, void *);
426 enum xpc_retval (*partid_to_nasids)(partid_t, void *);
427};
428
429
430extern struct xpc_interface xpc_interface;
431
432extern void xpc_set_interface(void (*)(int),
433 void (*)(int),
434 enum xpc_retval (*)(partid_t, int, u32, void **),
435 enum xpc_retval (*)(partid_t, int, void *),
436 enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
437 void *),
438 void (*)(partid_t, int, void *),
439 enum xpc_retval (*)(partid_t, void *));
440extern void xpc_clear_interface(void);
441
442
443extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
444 u16, u32, u32);
445extern void xpc_disconnect(int);
446
447static inline enum xpc_retval
448xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
449{
450 return xpc_interface.allocate(partid, ch_number, flags, payload);
451}
452
453static inline enum xpc_retval
454xpc_send(partid_t partid, int ch_number, void *payload)
455{
456 return xpc_interface.send(partid, ch_number, payload);
457}
458
459static inline enum xpc_retval
460xpc_send_notify(partid_t partid, int ch_number, void *payload,
461 xpc_notify_func func, void *key)
462{
463 return xpc_interface.send_notify(partid, ch_number, payload, func, key);
464}
465
466static inline void
467xpc_received(partid_t partid, int ch_number, void *payload)
468{
469 return xpc_interface.received(partid, ch_number, payload);
470}
471
472static inline enum xpc_retval
473xpc_partid_to_nasids(partid_t partid, void *nasids)
474{
475 return xpc_interface.partid_to_nasids(partid, nasids);
476}
477
478
479extern u64 xp_nofault_PIOR_target;
480extern int xp_nofault_PIOR(void *);
481extern int xp_error_PIOR(void);
482
483
484#endif /* _ASM_IA64_SN_XP_H */
485
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
deleted file mode 100644
index 3c0900ab8003..000000000000
--- a/include/asm-ia64/sn/xpc.h
+++ /dev/null
@@ -1,1267 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) structures and macros.
12 */
13
14#ifndef _ASM_IA64_SN_XPC_H
15#define _ASM_IA64_SN_XPC_H
16
17
18#include <linux/interrupt.h>
19#include <linux/sysctl.h>
20#include <linux/device.h>
21#include <linux/mutex.h>
22#include <linux/completion.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h>
25#include <asm/sn/bte.h>
26#include <asm/sn/clksupport.h>
27#include <asm/sn/addrs.h>
28#include <asm/sn/mspec.h>
29#include <asm/sn/shub_mmr.h>
30#include <asm/sn/xp.h>
31
32
33/*
34 * XPC Version numbers consist of a major and minor number. XPC can always
35 * talk to versions with same major #, and never talk to versions with a
36 * different major #.
37 */
38#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf))
39#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
40#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
41
42
43/*
44 * The next macros define word or bit representations for given
45 * C-brick nasid in either the SAL provided bit array representing
46 * nasids in the partition/machine or the AMO_t array used for
47 * inter-partition initiation communications.
48 *
49 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As
50 * such, some space will be saved by insisting that nasid information
51 * passed from SAL always be packed for C-Bricks and the
52 * cross-partition interrupts use the same packing scheme.
53 */
54#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
55#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
56#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
57 (1UL << XPC_NASID_B_INDEX(_n)))
58#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
59
60#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
61#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
62
63/* define the process name of HB checker and the CPU it is pinned to */
64#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
65#define XPC_HB_CHECK_CPU 0
66
67/* define the process name of the discovery thread */
68#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
69
70
71/*
72 * the reserved page
73 *
74 * SAL reserves one page of memory per partition for XPC. Though a full page
75 * in length (16384 bytes), its starting address is not page aligned, but it
76 * is cacheline aligned. The reserved page consists of the following:
77 *
78 * reserved page header
79 *
80 * The first cacheline of the reserved page contains the header
81 * (struct xpc_rsvd_page). Before SAL initialization has completed,
82 * SAL has set up the following fields of the reserved page header:
83 * SAL_signature, SAL_version, partid, and nasids_size. The other
84 * fields are set up by XPC. (xpc_rsvd_page points to the local
85 * partition's reserved page.)
86 *
87 * part_nasids mask
88 * mach_nasids mask
89 *
90 * SAL also sets up two bitmaps (or masks), one that reflects the actual
91 * nasids in this partition (part_nasids), and the other that reflects
92 * the actual nasids in the entire machine (mach_nasids). We're only
93 * interested in the even numbered nasids (which contain the processors
94 * and/or memory), so we only need half as many bits to represent the
95 * nasids. The part_nasids mask is located starting at the first cacheline
96 * following the reserved page header. The mach_nasids mask follows right
97 * after the part_nasids mask. The size in bytes of each mask is reflected
98 * by the reserved page header field 'nasids_size'. (Local partition's
99 * mask pointers are xpc_part_nasids and xpc_mach_nasids.)
100 *
101 * vars
102 * vars part
103 *
104 * Immediately following the mach_nasids mask are the XPC variables
105 * required by other partitions. First are those that are generic to all
106 * partitions (vars), followed on the next available cacheline by those
107 * which are partition specific (vars part). These are setup by XPC.
108 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
109 *
110 * Note: Until vars_pa is set, the partition XPC code has not been initialized.
111 */
112struct xpc_rsvd_page {
113 u64 SAL_signature; /* SAL: unique signature */
114 u64 SAL_version; /* SAL: version */
115 u8 partid; /* SAL: partition ID */
116 u8 version;
117 u8 pad1[6]; /* align to next u64 in cacheline */
118 volatile u64 vars_pa;
119 struct timespec stamp; /* time when reserved page was setup by XPC */
120 u64 pad2[9]; /* align to last u64 in cacheline */
121 u64 nasids_size; /* SAL: size of each nasid mask in bytes */
122};
123
124#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
125
126#define XPC_SUPPORTS_RP_STAMP(_version) \
127 (_version >= _XPC_VERSION(1,1))
128
129/*
130 * compare stamps - the return value is:
131 *
132 * < 0, if stamp1 < stamp2
133 * = 0, if stamp1 == stamp2
134 * > 0, if stamp1 > stamp2
135 */
136static inline int
137xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
138{
139 int ret;
140
141
142 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
143 ret = stamp1->tv_nsec - stamp2->tv_nsec;
144 }
145 return ret;
146}
147
148
149/*
150 * Define the structures by which XPC variables can be exported to other
151 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
152 */
153
154/*
155 * The following structure describes the partition generic variables
156 * needed by other partitions in order to properly initialize.
157 *
158 * struct xpc_vars version number also applies to struct xpc_vars_part.
159 * Changes to either structure and/or related functionality should be
160 * reflected by incrementing either the major or minor version numbers
161 * of struct xpc_vars.
162 */
163struct xpc_vars {
164 u8 version;
165 u64 heartbeat;
166 u64 heartbeating_to_mask;
167 u64 heartbeat_offline; /* if 0, heartbeat should be changing */
168 int act_nasid;
169 int act_phys_cpuid;
170 u64 vars_part_pa;
171 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
172 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
173};
174
175#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
176
177#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
178 (_version >= _XPC_VERSION(3,1))
179
180
181static inline int
182xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
183{
184 return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
185}
186
187static inline void
188xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
189{
190 u64 old_mask, new_mask;
191
192 do {
193 old_mask = vars->heartbeating_to_mask;
194 new_mask = (old_mask | (1UL << partid));
195 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
196 old_mask);
197}
198
199static inline void
200xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
201{
202 u64 old_mask, new_mask;
203
204 do {
205 old_mask = vars->heartbeating_to_mask;
206 new_mask = (old_mask & ~(1UL << partid));
207 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
208 old_mask);
209}
210
211
212/*
213 * The AMOs page consists of a number of AMO variables which are divided into
214 * four groups, The first two groups are used to identify an IRQ's sender.
215 * These two groups consist of 64 and 128 AMO variables respectively. The last
216 * two groups, consisting of just one AMO variable each, are used to identify
217 * the remote partitions that are currently engaged (from the viewpoint of
218 * the XPC running on the remote partition).
219 */
220#define XPC_NOTIFY_IRQ_AMOS 0
221#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
222#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
223#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
224
225
226/*
227 * The following structure describes the per partition specific variables.
228 *
229 * An array of these structures, one per partition, will be defined. As a
230 * partition becomes active XPC will copy the array entry corresponding to
231 * itself from that partition. It is desirable that the size of this
232 * structure evenly divide into a cacheline, such that none of the entries
233 * in this array crosses a cacheline boundary. As it is now, each entry
234 * occupies half a cacheline.
235 */
236struct xpc_vars_part {
237 volatile u64 magic;
238
239 u64 openclose_args_pa; /* physical address of open and close args */
240 u64 GPs_pa; /* physical address of Get/Put values */
241
242 u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */
243 int IPI_nasid; /* nasid of where to send IPIs */
244 int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
245
246 u8 nchannels; /* #of defined channels supported */
247
248 u8 reserved[23]; /* pad to a full 64 bytes */
249};
250
251/*
252 * The vars_part MAGIC numbers play a part in the first contact protocol.
253 *
254 * MAGIC1 indicates that the per partition specific variables for a remote
255 * partition have been initialized by this partition.
256 *
257 * MAGIC2 indicates that this partition has pulled the remote partititions
258 * per partition variables that pertain to this partition.
259 */
260#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
261#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
262
263
264/* the reserved page sizes and offsets */
265
266#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
267#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
268
269#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
270#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
271#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
272#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
273
274
275/*
276 * Functions registered by add_timer() or called by kernel_thread() only
277 * allow for a single 64-bit argument. The following macros can be used to
278 * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
279 * the passed argument.
280 */
281#define XPC_PACK_ARGS(_arg1, _arg2) \
282 ((((u64) _arg1) & 0xffffffff) | \
283 ((((u64) _arg2) & 0xffffffff) << 32))
284
285#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
286#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
287
288
289
290/*
291 * Define a Get/Put value pair (pointers) used with a message queue.
292 */
293struct xpc_gp {
294 volatile s64 get; /* Get value */
295 volatile s64 put; /* Put value */
296};
297
298#define XPC_GP_SIZE \
299 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
300
301
302
303/*
304 * Define a structure that contains arguments associated with opening and
305 * closing a channel.
306 */
307struct xpc_openclose_args {
308 u16 reason; /* reason why channel is closing */
309 u16 msg_size; /* sizeof each message entry */
310 u16 remote_nentries; /* #of message entries in remote msg queue */
311 u16 local_nentries; /* #of message entries in local msg queue */
312 u64 local_msgqueue_pa; /* physical address of local message queue */
313};
314
315#define XPC_OPENCLOSE_ARGS_SIZE \
316 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
317
318
319
320/* struct xpc_msg flags */
321
322#define XPC_M_DONE 0x01 /* msg has been received/consumed */
323#define XPC_M_READY 0x02 /* msg is ready to be sent */
324#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
325
326
327#define XPC_MSG_ADDRESS(_payload) \
328 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
329
330
331
332/*
333 * Defines notify entry.
334 *
335 * This is used to notify a message's sender that their message was received
336 * and consumed by the intended recipient.
337 */
338struct xpc_notify {
339 volatile u8 type; /* type of notification */
340
341 /* the following two fields are only used if type == XPC_N_CALL */
342 xpc_notify_func func; /* user's notify function */
343 void *key; /* pointer to user's key */
344};
345
346/* struct xpc_notify type of notification */
347
348#define XPC_N_CALL 0x01 /* notify function provided by user */
349
350
351
352/*
353 * Define the structure that manages all the stuff required by a channel. In
354 * particular, they are used to manage the messages sent across the channel.
355 *
356 * This structure is private to a partition, and is NOT shared across the
357 * partition boundary.
358 *
359 * There is an array of these structures for each remote partition. It is
360 * allocated at the time a partition becomes active. The array contains one
361 * of these structures for each potential channel connection to that partition.
362 *
363 * Each of these structures manages two message queues (circular buffers).
364 * They are allocated at the time a channel connection is made. One of
365 * these message queues (local_msgqueue) holds the locally created messages
366 * that are destined for the remote partition. The other of these message
367 * queues (remote_msgqueue) is a locally cached copy of the remote partition's
368 * own local_msgqueue.
369 *
370 * The following is a description of the Get/Put pointers used to manage these
371 * two message queues. Consider the local_msgqueue to be on one partition
372 * and the remote_msgqueue to be its cached copy on another partition. A
373 * description of what each of the lettered areas contains is included.
374 *
375 *
376 * local_msgqueue remote_msgqueue
377 *
378 * |/////////| |/////////|
379 * w_remote_GP.get --> +---------+ |/////////|
380 * | F | |/////////|
381 * remote_GP.get --> +---------+ +---------+ <-- local_GP->get
382 * | | | |
383 * | | | E |
384 * | | | |
385 * | | +---------+ <-- w_local_GP.get
386 * | B | |/////////|
387 * | | |////D////|
388 * | | |/////////|
389 * | | +---------+ <-- w_remote_GP.put
390 * | | |////C////|
391 * local_GP->put --> +---------+ +---------+ <-- remote_GP.put
392 * | | |/////////|
393 * | A | |/////////|
394 * | | |/////////|
395 * w_local_GP.put --> +---------+ |/////////|
396 * |/////////| |/////////|
397 *
398 *
399 * ( remote_GP.[get|put] are cached copies of the remote
400 * partition's local_GP->[get|put], and thus their values can
401 * lag behind their counterparts on the remote partition. )
402 *
403 *
404 * A - Messages that have been allocated, but have not yet been sent to the
405 * remote partition.
406 *
407 * B - Messages that have been sent, but have not yet been acknowledged by the
408 * remote partition as having been received.
409 *
410 * C - Area that needs to be prepared for the copying of sent messages, by
411 * the clearing of the message flags of any previously received messages.
412 *
413 * D - Area into which sent messages are to be copied from the remote
414 * partition's local_msgqueue and then delivered to their intended
415 * recipients. [ To allow for a multi-message copy, another pointer
416 * (next_msg_to_pull) has been added to keep track of the next message
417 * number needing to be copied (pulled). It chases after w_remote_GP.put.
418 * Any messages lying between w_local_GP.get and next_msg_to_pull have
419 * been copied and are ready to be delivered. ]
420 *
421 * E - Messages that have been copied and delivered, but have not yet been
422 * acknowledged by the recipient as having been received.
423 *
424 * F - Messages that have been acknowledged, but XPC has not yet notified the
425 * sender that the message was received by its intended recipient.
426 * This is also an area that needs to be prepared for the allocating of
427 * new messages, by the clearing of the message flags of the acknowledged
428 * messages.
429 */
430struct xpc_channel {
431 partid_t partid; /* ID of remote partition connected */
432 spinlock_t lock; /* lock for updating this structure */
433 u32 flags; /* general flags */
434
435 enum xpc_retval reason; /* reason why channel is disconnect'g */
436 int reason_line; /* line# disconnect initiated from */
437
438 u16 number; /* channel # */
439
440 u16 msg_size; /* sizeof each msg entry */
441 u16 local_nentries; /* #of msg entries in local msg queue */
442 u16 remote_nentries; /* #of msg entries in remote msg queue*/
443
444 void *local_msgqueue_base; /* base address of kmalloc'd space */
445 struct xpc_msg *local_msgqueue; /* local message queue */
446 void *remote_msgqueue_base; /* base address of kmalloc'd space */
447 struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
448 /* local message queue */
449 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
450 /* local message queue */
451
452 atomic_t references; /* #of external references to queues */
453
454 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
455 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
456
457 u8 delayed_IPI_flags; /* IPI flags received, but delayed */
458 /* action until channel disconnected */
459
460 /* queue of msg senders who want to be notified when msg received */
461
462 atomic_t n_to_notify; /* #of msg senders to notify */
463 struct xpc_notify *notify_queue;/* notify queue for messages sent */
464
465 xpc_channel_func func; /* user's channel function */
466 void *key; /* pointer to user's key */
467
468 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
469 struct completion wdisconnect_wait; /* wait for channel disconnect */
470
471 struct xpc_openclose_args *local_openclose_args; /* args passed on */
472 /* opening or closing of channel */
473
474 /* various flavors of local and remote Get/Put values */
475
476 struct xpc_gp *local_GP; /* local Get/Put values */
477 struct xpc_gp remote_GP; /* remote Get/Put values */
478 struct xpc_gp w_local_GP; /* working local Get/Put values */
479 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
480 s64 next_msg_to_pull; /* Put value of next msg to pull */
481
482 /* kthread management related fields */
483
484// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
485// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
486// >>> dependent on activity over the last interval of time
487 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
488 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
489 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
490 u32 kthreads_idle_limit; /* limit on #of kthreads idle */
491 atomic_t kthreads_active; /* #of kthreads actively working */
492 // >>> following field is temporary
493 u32 kthreads_created; /* total #of kthreads created */
494
495 wait_queue_head_t idle_wq; /* idle kthread wait queue */
496
497} ____cacheline_aligned;
498
499
500/* struct xpc_channel flags */
501
502#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
503
504#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
505#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
506#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
507#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
508
509#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
510#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */
511#define XPC_C_CONNECTEDCALLOUT_MADE \
512 0x00000080 /* connected callout completed */
513#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */
514#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */
515
516#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */
517#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */
518#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */
519#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */
520
521#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */
522#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */
523#define XPC_C_DISCONNECTINGCALLOUT \
524 0x00010000 /* disconnecting callout initiated */
525#define XPC_C_DISCONNECTINGCALLOUT_MADE \
526 0x00020000 /* disconnecting callout completed */
527#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
528
529
530
531/*
532 * Manages channels on a partition basis. There is one of these structures
533 * for each partition (a partition will never utilize the structure that
534 * represents itself).
535 */
536struct xpc_partition {
537
538 /* XPC HB infrastructure */
539
540 u8 remote_rp_version; /* version# of partition's rsvd pg */
541 struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
542 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
543 u64 remote_vars_pa; /* phys addr of partition's vars */
544 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
545 u64 last_heartbeat; /* HB at last read */
546 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
547 int remote_act_nasid; /* active part's act/deact nasid */
548 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
549 u32 act_IRQ_rcvd; /* IRQs since activation */
550 spinlock_t act_lock; /* protect updating of act_state */
551 u8 act_state; /* from XPC HB viewpoint */
552 u8 remote_vars_version; /* version# of partition's vars */
553 enum xpc_retval reason; /* reason partition is deactivating */
554 int reason_line; /* line# deactivation initiated from */
555 int reactivate_nasid; /* nasid in partition to reactivate */
556
557 unsigned long disengage_request_timeout; /* timeout in jiffies */
558 struct timer_list disengage_request_timer;
559
560
561 /* XPC infrastructure referencing and teardown control */
562
563 volatile u8 setup_state; /* infrastructure setup state */
564 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
565 atomic_t references; /* #of references to infrastructure */
566
567
568 /*
569 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
570 * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
571 * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
572 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
573 */
574
575
576 u8 nchannels; /* #of defined channels supported */
577 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
578 atomic_t nchannels_engaged;/* #of channels engaged with remote part */
579 struct xpc_channel *channels;/* array of channel structures */
580
581 void *local_GPs_base; /* base address of kmalloc'd space */
582 struct xpc_gp *local_GPs; /* local Get/Put values */
583 void *remote_GPs_base; /* base address of kmalloc'd space */
584 struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
585 /* values */
586 u64 remote_GPs_pa; /* phys address of remote partition's local */
587 /* Get/Put values */
588
589
590 /* fields used to pass args when opening or closing a channel */
591
592 void *local_openclose_args_base; /* base address of kmalloc'd space */
593 struct xpc_openclose_args *local_openclose_args; /* local's args */
594 void *remote_openclose_args_base; /* base address of kmalloc'd space */
595 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
596 /* args */
597 u64 remote_openclose_args_pa; /* phys addr of remote's args */
598
599
600 /* IPI sending, receiving and handling related fields */
601
602 int remote_IPI_nasid; /* nasid of where to send IPIs */
603 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
604 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
605
606 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
607 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
608 char IPI_owner[8]; /* IPI owner's name */
609 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
610
611 spinlock_t IPI_lock; /* IPI handler lock */
612
613
614 /* channel manager related fields */
615
616 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
617 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
618
619} ____cacheline_aligned;
620
621
622/* struct xpc_partition act_state values (for XPC HB) */
623
624#define XPC_P_INACTIVE 0x00 /* partition is not active */
625#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
626#define XPC_P_ACTIVATING 0x02 /* activation thread started */
627#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
628#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
629
630
631#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
632 xpc_deactivate_partition(__LINE__, (_p), (_reason))
633
634
635/* struct xpc_partition setup_state values */
636
637#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
638#define XPC_P_SETUP 0x01 /* infrastructure is setup */
639#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
640#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
641
642
643
644/*
645 * struct xpc_partition IPI_timer #of seconds to wait before checking for
646 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
647 * after the IPI was received.
648 */
649#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
650
651
652/* number of seconds to wait for other partitions to disengage */
653#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
654
655/* interval in seconds to print 'waiting disengagement' messages */
656#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
657
658
659#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
660
661
662
663/* found in xp_main.c */
664extern struct xpc_registration xpc_registrations[];
665
666
667/* found in xpc_main.c */
668extern struct device *xpc_part;
669extern struct device *xpc_chan;
670extern int xpc_disengage_request_timelimit;
671extern int xpc_disengage_request_timedout;
672extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
673extern void xpc_dropped_IPI_check(struct xpc_partition *);
674extern void xpc_activate_partition(struct xpc_partition *);
675extern void xpc_activate_kthreads(struct xpc_channel *, int);
676extern void xpc_create_kthreads(struct xpc_channel *, int, int);
677extern void xpc_disconnect_wait(int);
678
679
680/* found in xpc_partition.c */
681extern int xpc_exiting;
682extern struct xpc_vars *xpc_vars;
683extern struct xpc_rsvd_page *xpc_rsvd_page;
684extern struct xpc_vars_part *xpc_vars_part;
685extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
686extern char *xpc_remote_copy_buffer;
687extern void *xpc_remote_copy_buffer_base;
688extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
689extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
690extern void xpc_allow_IPI_ops(void);
691extern void xpc_restrict_IPI_ops(void);
692extern int xpc_identify_act_IRQ_sender(void);
693extern int xpc_partition_disengaged(struct xpc_partition *);
694extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
695extern void xpc_mark_partition_inactive(struct xpc_partition *);
696extern void xpc_discovery(void);
697extern void xpc_check_remote_hb(void);
698extern void xpc_deactivate_partition(const int, struct xpc_partition *,
699 enum xpc_retval);
700extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
701
702
703/* found in xpc_channel.c */
704extern void xpc_initiate_connect(int);
705extern void xpc_initiate_disconnect(int);
706extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
707extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
708extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
709 xpc_notify_func, void *);
710extern void xpc_initiate_received(partid_t, int, void *);
711extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
712extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
713extern void xpc_process_channel_activity(struct xpc_partition *);
714extern void xpc_connected_callout(struct xpc_channel *);
715extern void xpc_deliver_msg(struct xpc_channel *);
716extern void xpc_disconnect_channel(const int, struct xpc_channel *,
717 enum xpc_retval, unsigned long *);
718extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
719extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
720extern void xpc_teardown_infrastructure(struct xpc_partition *);
721
722
723
724static inline void
725xpc_wakeup_channel_mgr(struct xpc_partition *part)
726{
727 if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
728 wake_up(&part->channel_mgr_wq);
729 }
730}
731
732
733
734/*
735 * These next two inlines are used to keep us from tearing down a channel's
736 * msg queues while a thread may be referencing them.
737 */
738static inline void
739xpc_msgqueue_ref(struct xpc_channel *ch)
740{
741 atomic_inc(&ch->references);
742}
743
744static inline void
745xpc_msgqueue_deref(struct xpc_channel *ch)
746{
747 s32 refs = atomic_dec_return(&ch->references);
748
749 DBUG_ON(refs < 0);
750 if (refs == 0) {
751 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
752 }
753}
754
755
756
757#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
758 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
759
760
761/*
762 * These two inlines are used to keep us from tearing down a partition's
763 * setup infrastructure while a thread may be referencing it.
764 */
765static inline void
766xpc_part_deref(struct xpc_partition *part)
767{
768 s32 refs = atomic_dec_return(&part->references);
769
770
771 DBUG_ON(refs < 0);
772 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
773 wake_up(&part->teardown_wq);
774 }
775}
776
777static inline int
778xpc_part_ref(struct xpc_partition *part)
779{
780 int setup;
781
782
783 atomic_inc(&part->references);
784 setup = (part->setup_state == XPC_P_SETUP);
785 if (!setup) {
786 xpc_part_deref(part);
787 }
788 return setup;
789}
790
791
792
793/*
794 * The following macro is to be used for the setting of the reason and
795 * reason_line fields in both the struct xpc_channel and struct xpc_partition
796 * structures.
797 */
798#define XPC_SET_REASON(_p, _reason, _line) \
799 { \
800 (_p)->reason = _reason; \
801 (_p)->reason_line = _line; \
802 }
803
804
805
806/*
807 * This next set of inlines are used to keep track of when a partition is
808 * potentially engaged in accessing memory belonging to another partition.
809 */
810
811static inline void
812xpc_mark_partition_engaged(struct xpc_partition *part)
813{
814 unsigned long irq_flags;
815 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
816 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
817
818
819 local_irq_save(irq_flags);
820
821 /* set bit corresponding to our partid in remote partition's AMO */
822 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
823 (1UL << sn_partition_id));
824 /*
825 * We must always use the nofault function regardless of whether we
826 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
827 * didn't, we'd never know that the other partition is down and would
828 * keep sending IPIs and AMOs to it until the heartbeat times out.
829 */
830 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
831 variable), xp_nofault_PIOR_target));
832
833 local_irq_restore(irq_flags);
834}
835
836static inline void
837xpc_mark_partition_disengaged(struct xpc_partition *part)
838{
839 unsigned long irq_flags;
840 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
841 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
842
843
844 local_irq_save(irq_flags);
845
846 /* clear bit corresponding to our partid in remote partition's AMO */
847 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
848 ~(1UL << sn_partition_id));
849 /*
850 * We must always use the nofault function regardless of whether we
851 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
852 * didn't, we'd never know that the other partition is down and would
853 * keep sending IPIs and AMOs to it until the heartbeat times out.
854 */
855 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
856 variable), xp_nofault_PIOR_target));
857
858 local_irq_restore(irq_flags);
859}
860
861static inline void
862xpc_request_partition_disengage(struct xpc_partition *part)
863{
864 unsigned long irq_flags;
865 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
866 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
867
868
869 local_irq_save(irq_flags);
870
871 /* set bit corresponding to our partid in remote partition's AMO */
872 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
873 (1UL << sn_partition_id));
874 /*
875 * We must always use the nofault function regardless of whether we
876 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
877 * didn't, we'd never know that the other partition is down and would
878 * keep sending IPIs and AMOs to it until the heartbeat times out.
879 */
880 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
881 variable), xp_nofault_PIOR_target));
882
883 local_irq_restore(irq_flags);
884}
885
886static inline void
887xpc_cancel_partition_disengage_request(struct xpc_partition *part)
888{
889 unsigned long irq_flags;
890 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
891 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
892
893
894 local_irq_save(irq_flags);
895
896 /* clear bit corresponding to our partid in remote partition's AMO */
897 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
898 ~(1UL << sn_partition_id));
899 /*
900 * We must always use the nofault function regardless of whether we
901 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
902 * didn't, we'd never know that the other partition is down and would
903 * keep sending IPIs and AMOs to it until the heartbeat times out.
904 */
905 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
906 variable), xp_nofault_PIOR_target));
907
908 local_irq_restore(irq_flags);
909}
910
911static inline u64
912xpc_partition_engaged(u64 partid_mask)
913{
914 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
915
916
917 /* return our partition's AMO variable ANDed with partid_mask */
918 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
919 partid_mask);
920}
921
922static inline u64
923xpc_partition_disengage_requested(u64 partid_mask)
924{
925 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
926
927
928 /* return our partition's AMO variable ANDed with partid_mask */
929 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
930 partid_mask);
931}
932
933static inline void
934xpc_clear_partition_engaged(u64 partid_mask)
935{
936 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
937
938
939 /* clear bit(s) based on partid_mask in our partition's AMO */
940 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
941 ~partid_mask);
942}
943
944static inline void
945xpc_clear_partition_disengage_request(u64 partid_mask)
946{
947 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
948
949
950 /* clear bit(s) based on partid_mask in our partition's AMO */
951 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
952 ~partid_mask);
953}
954
955
956
957/*
958 * The following set of macros and inlines are used for the sending and
959 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
960 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
961 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
962 */
963
964static inline u64
965xpc_IPI_receive(AMO_t *amo)
966{
967 return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
968}
969
970
971static inline enum xpc_retval
972xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
973{
974 int ret = 0;
975 unsigned long irq_flags;
976
977
978 local_irq_save(irq_flags);
979
980 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
981 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
982
983 /*
984 * We must always use the nofault function regardless of whether we
985 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
986 * didn't, we'd never know that the other partition is down and would
987 * keep sending IPIs and AMOs to it until the heartbeat times out.
988 */
989 ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
990 xp_nofault_PIOR_target));
991
992 local_irq_restore(irq_flags);
993
994 return ((ret == 0) ? xpcSuccess : xpcPioReadError);
995}
996
997
998/*
999 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
1000 */
1001
1002/*
1003 * Flag the appropriate AMO variable and send an IPI to the specified node.
1004 */
1005static inline void
1006xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
1007 int to_phys_cpuid)
1008{
1009 int w_index = XPC_NASID_W_INDEX(from_nasid);
1010 int b_index = XPC_NASID_B_INDEX(from_nasid);
1011 AMO_t *amos = (AMO_t *) __va(amos_page_pa +
1012 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
1013
1014
1015 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
1016 to_phys_cpuid, SGI_XPC_ACTIVATE);
1017}
1018
1019static inline void
1020xpc_IPI_send_activate(struct xpc_vars *vars)
1021{
1022 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
1023 vars->act_nasid, vars->act_phys_cpuid);
1024}
1025
1026static inline void
1027xpc_IPI_send_activated(struct xpc_partition *part)
1028{
1029 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1030 part->remote_act_nasid, part->remote_act_phys_cpuid);
1031}
1032
1033static inline void
1034xpc_IPI_send_reactivate(struct xpc_partition *part)
1035{
1036 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
1037 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
1038}
1039
1040static inline void
1041xpc_IPI_send_disengage(struct xpc_partition *part)
1042{
1043 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1044 part->remote_act_nasid, part->remote_act_phys_cpuid);
1045}
1046
1047
1048/*
1049 * IPIs associated with SGI_XPC_NOTIFY IRQ.
1050 */
1051
1052/*
1053 * Send an IPI to the remote partition that is associated with the
1054 * specified channel.
1055 */
1056#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
1057 xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
1058
1059static inline void
1060xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
1061 unsigned long *irq_flags)
1062{
1063 struct xpc_partition *part = &xpc_partitions[ch->partid];
1064 enum xpc_retval ret;
1065
1066
1067 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
1068 ret = xpc_IPI_send(part->remote_IPI_amo_va,
1069 (u64) ipi_flag << (ch->number * 8),
1070 part->remote_IPI_nasid,
1071 part->remote_IPI_phys_cpuid,
1072 SGI_XPC_NOTIFY);
1073 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
1074 ipi_flag_string, ch->partid, ch->number, ret);
1075 if (unlikely(ret != xpcSuccess)) {
1076 if (irq_flags != NULL) {
1077 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1078 }
1079 XPC_DEACTIVATE_PARTITION(part, ret);
1080 if (irq_flags != NULL) {
1081 spin_lock_irqsave(&ch->lock, *irq_flags);
1082 }
1083 }
1084 }
1085}
1086
1087
1088/*
1089 * Make it look like the remote partition, which is associated with the
1090 * specified channel, sent us an IPI. This faked IPI will be handled
1091 * by xpc_dropped_IPI_check().
1092 */
1093#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
1094 xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
1095
1096static inline void
1097xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
1098 char *ipi_flag_string)
1099{
1100 struct xpc_partition *part = &xpc_partitions[ch->partid];
1101
1102
1103 FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
1104 FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
1105 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
1106 ipi_flag_string, ch->partid, ch->number);
1107}
1108
1109
1110/*
1111 * The sending and receiving of IPIs includes the setting of an AMO variable
1112 * to indicate the reason the IPI was sent. The 64-bit variable is divided
1113 * up into eight bytes, ordered from right to left. Byte zero pertains to
1114 * channel 0, byte one to channel 1, and so on. Each byte is described by
1115 * the following IPI flags.
1116 */
1117
1118#define XPC_IPI_CLOSEREQUEST 0x01
1119#define XPC_IPI_CLOSEREPLY 0x02
1120#define XPC_IPI_OPENREQUEST 0x04
1121#define XPC_IPI_OPENREPLY 0x08
1122#define XPC_IPI_MSGREQUEST 0x10
1123
1124
1125/* given an AMO variable and a channel#, get its associated IPI flags */
1126#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1127#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
1128
1129#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
1130#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010))
1131
1132
1133static inline void
1134xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
1135{
1136 struct xpc_openclose_args *args = ch->local_openclose_args;
1137
1138
1139 args->reason = ch->reason;
1140
1141 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
1142}
1143
1144static inline void
1145xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
1146{
1147 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
1148}
1149
1150static inline void
1151xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
1152{
1153 struct xpc_openclose_args *args = ch->local_openclose_args;
1154
1155
1156 args->msg_size = ch->msg_size;
1157 args->local_nentries = ch->local_nentries;
1158
1159 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
1160}
1161
1162static inline void
1163xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
1164{
1165 struct xpc_openclose_args *args = ch->local_openclose_args;
1166
1167
1168 args->remote_nentries = ch->remote_nentries;
1169 args->local_nentries = ch->local_nentries;
1170 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
1171
1172 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
1173}
1174
1175static inline void
1176xpc_IPI_send_msgrequest(struct xpc_channel *ch)
1177{
1178 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
1179}
1180
1181static inline void
1182xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
1183{
1184 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
1185}
1186
1187
1188/*
1189 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
1190 * pages are located in the lowest granule. The lowest granule uses 4k pages
1191 * for cached references and an alternate TLB handler to never provide a
1192 * cacheable mapping for the entire region. This will prevent speculative
1193 * reading of cached copies of our lines from being issued which will cause
1194 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
1195 * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
1196 * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
1197 * activation and 2 AMO variables for partition deactivation.
1198 */
1199static inline AMO_t *
1200xpc_IPI_init(int index)
1201{
1202 AMO_t *amo = xpc_vars->amos_page + index;
1203
1204
1205 (void) xpc_IPI_receive(amo); /* clear AMO variable */
1206 return amo;
1207}
1208
1209
1210
1211static inline enum xpc_retval
1212xpc_map_bte_errors(bte_result_t error)
1213{
1214 if (error == BTE_SUCCESS)
1215 return xpcSuccess;
1216
1217 if (is_shub2()) {
1218 if (BTE_VALID_SH2_ERROR(error))
1219 return xpcBteSh2Start + error;
1220 return xpcBteUnmappedError;
1221 }
1222 switch (error) {
1223 case BTE_SUCCESS: return xpcSuccess;
1224 case BTEFAIL_DIR: return xpcBteDirectoryError;
1225 case BTEFAIL_POISON: return xpcBtePoisonError;
1226 case BTEFAIL_WERR: return xpcBteWriteError;
1227 case BTEFAIL_ACCESS: return xpcBteAccessError;
1228 case BTEFAIL_PWERR: return xpcBtePWriteError;
1229 case BTEFAIL_PRERR: return xpcBtePReadError;
1230 case BTEFAIL_TOUT: return xpcBteTimeOutError;
1231 case BTEFAIL_XTERR: return xpcBteXtalkError;
1232 case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
1233 default: return xpcBteUnmappedError;
1234 }
1235}
1236
1237
1238
1239/*
1240 * Check to see if there is any channel activity to/from the specified
1241 * partition.
1242 */
1243static inline void
1244xpc_check_for_channel_activity(struct xpc_partition *part)
1245{
1246 u64 IPI_amo;
1247 unsigned long irq_flags;
1248
1249
1250 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
1251 if (IPI_amo == 0) {
1252 return;
1253 }
1254
1255 spin_lock_irqsave(&part->IPI_lock, irq_flags);
1256 part->local_IPI_amo |= IPI_amo;
1257 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
1258
1259 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
1260 XPC_PARTID(part), IPI_amo);
1261
1262 xpc_wakeup_channel_mgr(part);
1263}
1264
1265
1266#endif /* _ASM_IA64_SN_XPC_H */
1267
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 595112bca3cc..26e250bfb912 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -146,23 +146,23 @@ do { \
146 146
147# define local_irq_save(x) \ 147# define local_irq_save(x) \
148do { \ 148do { \
149 unsigned long psr; \ 149 unsigned long __psr; \
150 \ 150 \
151 __local_irq_save(psr); \ 151 __local_irq_save(__psr); \
152 if (psr & IA64_PSR_I) \ 152 if (__psr & IA64_PSR_I) \
153 __save_ip(); \ 153 __save_ip(); \
154 (x) = psr; \ 154 (x) = __psr; \
155} while (0) 155} while (0)
156 156
157# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) 157# define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0)
158 158
159# define local_irq_restore(x) \ 159# define local_irq_restore(x) \
160do { \ 160do { \
161 unsigned long old_psr, psr = (x); \ 161 unsigned long __old_psr, __psr = (x); \
162 \ 162 \
163 local_save_flags(old_psr); \ 163 local_save_flags(__old_psr); \
164 __local_irq_restore(psr); \ 164 __local_irq_restore(__psr); \
165 if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ 165 if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \
166 __save_ip(); \ 166 __save_ip(); \
167} while (0) 167} while (0)
168 168
@@ -210,6 +210,13 @@ struct task_struct;
210extern void ia64_save_extra (struct task_struct *task); 210extern void ia64_save_extra (struct task_struct *task);
211extern void ia64_load_extra (struct task_struct *task); 211extern void ia64_load_extra (struct task_struct *task);
212 212
213#ifdef CONFIG_VIRT_CPU_ACCOUNTING
214extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
215# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
216#else
217# define IA64_ACCOUNT_ON_SWITCH(p,n)
218#endif
219
213#ifdef CONFIG_PERFMON 220#ifdef CONFIG_PERFMON
214 DECLARE_PER_CPU(unsigned long, pfm_syst_info); 221 DECLARE_PER_CPU(unsigned long, pfm_syst_info);
215# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) 222# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@@ -222,6 +229,7 @@ extern void ia64_load_extra (struct task_struct *task);
222 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) 229 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
223 230
224#define __switch_to(prev,next,last) do { \ 231#define __switch_to(prev,next,last) do { \
232 IA64_ACCOUNT_ON_SWITCH(prev, next); \
225 if (IA64_HAS_EXTRA_STATE(prev)) \ 233 if (IA64_HAS_EXTRA_STATE(prev)) \
226 ia64_save_extra(prev); \ 234 ia64_save_extra(prev); \
227 if (IA64_HAS_EXTRA_STATE(next)) \ 235 if (IA64_HAS_EXTRA_STATE(next)) \
@@ -266,6 +274,10 @@ void cpu_idle_wait(void);
266 274
267void default_idle(void); 275void default_idle(void);
268 276
277#ifdef CONFIG_VIRT_CPU_ACCOUNTING
278extern void account_system_vtime(struct task_struct *);
279#endif
280
269#endif /* __KERNEL__ */ 281#endif /* __KERNEL__ */
270 282
271#endif /* __ASSEMBLY__ */ 283#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 93d83cbe0c8c..f30e05583869 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -31,6 +31,12 @@ struct thread_info {
31 mm_segment_t addr_limit; /* user-level address space limit */ 31 mm_segment_t addr_limit; /* user-level address space limit */
32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
33 struct restart_block restart_block; 33 struct restart_block restart_block;
34#ifdef CONFIG_VIRT_CPU_ACCOUNTING
35 __u64 ac_stamp;
36 __u64 ac_leave;
37 __u64 ac_stime;
38 __u64 ac_utime;
39#endif
34}; 40};
35 41
36#define THREAD_SIZE KERNEL_STACK_SIZE 42#define THREAD_SIZE KERNEL_STACK_SIZE
@@ -62,9 +68,17 @@ struct thread_info {
62#define task_stack_page(tsk) ((void *)(tsk)) 68#define task_stack_page(tsk) ((void *)(tsk))
63 69
64#define __HAVE_THREAD_FUNCTIONS 70#define __HAVE_THREAD_FUNCTIONS
71#ifdef CONFIG_VIRT_CPU_ACCOUNTING
72#define setup_thread_stack(p, org) \
73 *task_thread_info(p) = *task_thread_info(org); \
74 task_thread_info(p)->ac_stime = 0; \
75 task_thread_info(p)->ac_utime = 0; \
76 task_thread_info(p)->task = (p);
77#else
65#define setup_thread_stack(p, org) \ 78#define setup_thread_stack(p, org) \
66 *task_thread_info(p) = *task_thread_info(org); \ 79 *task_thread_info(p) = *task_thread_info(org); \
67 task_thread_info(p)->task = (p); 80 task_thread_info(p)->task = (p);
81#endif
68#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 82#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
69 83
70#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 84#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -87,7 +101,6 @@ extern void tsk_clear_notify_resume(struct task_struct *tsk);
87#define TIF_SYSCALL_TRACE 2 /* syscall trace active */ 101#define TIF_SYSCALL_TRACE 2 /* syscall trace active */
88#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ 102#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
89#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ 103#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
90#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
91#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */ 104#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
92#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 105#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
93#define TIF_MEMDIE 17 106#define TIF_MEMDIE 17
@@ -95,6 +108,7 @@ extern void tsk_clear_notify_resume(struct task_struct *tsk);
95#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 108#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
96#define TIF_FREEZE 20 /* is freezing for suspend */ 109#define TIF_FREEZE 20 /* is freezing for suspend */
97#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */ 110#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
111#define TIF_RESTORE_SIGMASK 22 /* restore signal mask in do_signal() */
98 112
99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 113#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
100#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 114#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
@@ -112,8 +126,7 @@ extern void tsk_clear_notify_resume(struct task_struct *tsk);
112 126
113/* "work to do on user-return" bits */ 127/* "work to do on user-return" bits */
114#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\ 128#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
115 _TIF_NEED_RESCHED| _TIF_SYSCALL_TRACE|\ 129 _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
116 _TIF_RESTORE_SIGMASK)
117/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ 130/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
118#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) 131#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
119 132
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h
index 26edcb750f9f..20d8a39680c2 100644
--- a/include/asm-ia64/tlb.h
+++ b/include/asm-ia64/tlb.h
@@ -64,6 +64,32 @@ struct mmu_gather {
64 struct page *pages[FREE_PTE_NR]; 64 struct page *pages[FREE_PTE_NR];
65}; 65};
66 66
67struct ia64_tr_entry {
68 u64 ifa;
69 u64 itir;
70 u64 pte;
71 u64 rr;
72}; /*Record for tr entry!*/
73
74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
75extern void ia64_ptr_entry(u64 target_mask, int slot);
76
77extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
78
79/*
80 region register macros
81*/
82#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
83#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
84#define RR_VE_MASK 0x0000000000000001L
85#define RR_VE_SHIFT 0
86#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
87#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
88#define RR_PS_MASK 0x00000000000000fcL
89#define RR_PS_SHIFT 2
90#define RR_RID_MASK 0x00000000ffffff00L
91#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
92
67/* Users of the generic TLB shootdown code must declare this storage space. */ 93/* Users of the generic TLB shootdown code must declare this storage space. */
68DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 94DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
69 95
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index 7774a1cac0cc..3be25dfed164 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -17,6 +17,7 @@
17 * Now for some TLB flushing routines. This is the kind of stuff that 17 * Now for some TLB flushing routines. This is the kind of stuff that
18 * can be very expensive, so try to avoid them whenever possible. 18 * can be very expensive, so try to avoid them whenever possible.
19 */ 19 */
20extern void setup_ptcg_sem(int max_purges, int from_palo);
20 21
21/* 22/*
22 * Flush everything (kernel mapping may also have changed due to 23 * Flush everything (kernel mapping may also have changed due to
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 2d67b72b18d0..32863b3bb1d3 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -93,7 +93,7 @@ void build_cpu_to_node_map(void);
93 .cache_nice_tries = 2, \ 93 .cache_nice_tries = 2, \
94 .busy_idx = 3, \ 94 .busy_idx = 3, \
95 .idle_idx = 2, \ 95 .idle_idx = 2, \
96 .newidle_idx = 0, /* unused */ \ 96 .newidle_idx = 2, \
97 .wake_idx = 1, \ 97 .wake_idx = 1, \
98 .forkexec_idx = 1, \ 98 .forkexec_idx = 1, \
99 .flags = SD_LOAD_BALANCE \ 99 .flags = SD_LOAD_BALANCE \
@@ -116,6 +116,13 @@ void build_cpu_to_node_map(void);
116#define smt_capable() (smp_num_siblings > 1) 116#define smt_capable() (smp_num_siblings > 1)
117#endif 117#endif
118 118
119extern void arch_fix_phys_package_id(int num, u32 slot);
120
121#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
122 CPU_MASK_ALL : \
123 node_to_cpumask(pcibus_to_node(bus)) \
124 )
125
119#include <asm-generic/topology.h> 126#include <asm-generic/topology.h>
120 127
121#endif /* _ASM_IA64_TOPOLOGY_H */ 128#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h
index bb8559888103..7bddc7f58584 100644
--- a/include/asm-ia64/unaligned.h
+++ b/include/asm-ia64/unaligned.h
@@ -1,6 +1,11 @@
1#ifndef _ASM_IA64_UNALIGNED_H 1#ifndef _ASM_IA64_UNALIGNED_H
2#define _ASM_IA64_UNALIGNED_H 2#define _ASM_IA64_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/le_struct.h>
5#include <linux/unaligned/be_byteshift.h>
6#include <linux/unaligned/generic.h>
7
8#define get_unaligned __get_unaligned_le
9#define put_unaligned __put_unaligned_le
5 10
6#endif /* _ASM_IA64_UNALIGNED_H */ 11#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/include/asm-ia64/uncached.h b/include/asm-ia64/uncached.h
index b82d923b73c1..13d7e65ca3cc 100644
--- a/include/asm-ia64/uncached.h
+++ b/include/asm-ia64/uncached.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. 2 * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License 5 * under the terms of version 2 of the GNU General Public License
@@ -8,5 +8,5 @@
8 * Prototypes for the uncached page allocator 8 * Prototypes for the uncached page allocator
9 */ 9 */
10 10
11extern unsigned long uncached_alloc_page(int nid); 11extern unsigned long uncached_alloc_page(int starting_nid, int n_pages);
12extern void uncached_free_page(unsigned long); 12extern void uncached_free_page(unsigned long uc_addr, int n_pages);