diff options
Diffstat (limited to 'arch/ia64/include')
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/auxvec.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/barrier.h | 68 | ||||
-rw-r--r-- | arch/ia64/include/asm/exec.h | 14 | ||||
-rw-r--r-- | arch/ia64/include/asm/futex.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/irqflags.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/kexec.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/mca_asm.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/page.h | 10 | ||||
-rw-r--r-- | arch/ia64/include/asm/pci.h | 8 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 8 | ||||
-rw-r--r-- | arch/ia64/include/asm/sal.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/setup.h | 18 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/pda.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/switch_to.h | 87 | ||||
-rw-r--r-- | arch/ia64/include/asm/system.h | 207 | ||||
-rw-r--r-- | arch/ia64/include/asm/uv/uv.h | 1 |
21 files changed, 224 insertions, 212 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index a06dfb13d518..301609c3fcec 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/numa.h> | 34 | #include <linux/numa.h> |
35 | #include <asm/system.h> | ||
36 | #include <asm/numa.h> | 35 | #include <asm/numa.h> |
37 | 36 | ||
38 | #define COMPILER_DEPENDENT_INT64 long | 37 | #define COMPILER_DEPENDENT_INT64 long |
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 3fad89ee01cb..7d9116600a36 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | ||
19 | 18 | ||
20 | 19 | ||
21 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) | 20 | #define ATOMIC_INIT(i) ((atomic_t) { (i) }) |
diff --git a/arch/ia64/include/asm/auxvec.h b/arch/ia64/include/asm/auxvec.h index 23cebe5685b9..58277fc650ef 100644 --- a/arch/ia64/include/asm/auxvec.h +++ b/arch/ia64/include/asm/auxvec.h | |||
@@ -8,4 +8,6 @@ | |||
8 | #define AT_SYSINFO 32 | 8 | #define AT_SYSINFO 32 |
9 | #define AT_SYSINFO_EHDR 33 | 9 | #define AT_SYSINFO_EHDR 33 |
10 | 10 | ||
11 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ | ||
12 | |||
11 | #endif /* _ASM_IA64_AUXVEC_H */ | 13 | #endif /* _ASM_IA64_AUXVEC_H */ |
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h new file mode 100644 index 000000000000..60576e06b6fb --- /dev/null +++ b/arch/ia64/include/asm/barrier.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Memory barrier definitions. This is based on information published | ||
3 | * in the Processor Abstraction Layer and the System Abstraction Layer | ||
4 | * manual. | ||
5 | * | ||
6 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
10 | */ | ||
11 | #ifndef _ASM_IA64_BARRIER_H | ||
12 | #define _ASM_IA64_BARRIER_H | ||
13 | |||
14 | #include <linux/compiler.h> | ||
15 | |||
16 | /* | ||
17 | * Macros to force memory ordering. In these descriptions, "previous" | ||
18 | * and "subsequent" refer to program order; "visible" means that all | ||
19 | * architecturally visible effects of a memory access have occurred | ||
20 | * (at a minimum, this means the memory has been read or written). | ||
21 | * | ||
22 | * wmb(): Guarantees that all preceding stores to memory- | ||
23 | * like regions are visible before any subsequent | ||
24 | * stores and that all following stores will be | ||
25 | * visible only after all previous stores. | ||
26 | * rmb(): Like wmb(), but for reads. | ||
27 | * mb(): wmb()/rmb() combo, i.e., all previous memory | ||
28 | * accesses are visible before all subsequent | ||
29 | * accesses and vice versa. This is also known as | ||
30 | * a "fence." | ||
31 | * | ||
32 | * Note: "mb()" and its variants cannot be used as a fence to order | ||
33 | * accesses to memory mapped I/O registers. For that, mf.a needs to | ||
34 | * be used. However, we don't want to always use mf.a because (a) | ||
35 | * it's (presumably) much slower than mf and (b) mf.a is supported for | ||
36 | * sequential memory pages only. | ||
37 | */ | ||
38 | #define mb() ia64_mf() | ||
39 | #define rmb() mb() | ||
40 | #define wmb() mb() | ||
41 | #define read_barrier_depends() do { } while(0) | ||
42 | |||
43 | #ifdef CONFIG_SMP | ||
44 | # define smp_mb() mb() | ||
45 | # define smp_rmb() rmb() | ||
46 | # define smp_wmb() wmb() | ||
47 | # define smp_read_barrier_depends() read_barrier_depends() | ||
48 | #else | ||
49 | # define smp_mb() barrier() | ||
50 | # define smp_rmb() barrier() | ||
51 | # define smp_wmb() barrier() | ||
52 | # define smp_read_barrier_depends() do { } while(0) | ||
53 | #endif | ||
54 | |||
55 | /* | ||
56 | * XXX check on this ---I suspect what Linus really wants here is | ||
57 | * acquire vs release semantics but we can't discuss this stuff with | ||
58 | * Linus just yet. Grrr... | ||
59 | */ | ||
60 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | ||
61 | |||
62 | /* | ||
63 | * The group barrier in front of the rsm & ssm are necessary to ensure | ||
64 | * that none of the previous instructions in the same group are | ||
65 | * affected by the rsm/ssm. | ||
66 | */ | ||
67 | |||
68 | #endif /* _ASM_IA64_BARRIER_H */ | ||
diff --git a/arch/ia64/include/asm/exec.h b/arch/ia64/include/asm/exec.h new file mode 100644 index 000000000000..b26242490e36 --- /dev/null +++ b/arch/ia64/include/asm/exec.h | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * Process execution defines. | ||
3 | * | ||
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
7 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
8 | */ | ||
9 | #ifndef _ASM_IA64_EXEC_H | ||
10 | #define _ASM_IA64_EXEC_H | ||
11 | |||
12 | #define arch_align_stack(x) (x) | ||
13 | |||
14 | #endif /* _ASM_IA64_EXEC_H */ | ||
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 8428525ddb22..0ab82cc2dc8f 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/futex.h> | 4 | #include <linux/futex.h> |
5 | #include <linux/uaccess.h> | 5 | #include <linux/uaccess.h> |
6 | #include <asm/errno.h> | 6 | #include <asm/errno.h> |
7 | #include <asm/system.h> | ||
8 | 7 | ||
9 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 8 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
10 | do { \ | 9 | do { \ |
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index e5a6c3530c6c..2c26321c28c3 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h | |||
@@ -71,7 +71,6 @@ extern unsigned int num_io_spaces; | |||
71 | #include <asm/intrinsics.h> | 71 | #include <asm/intrinsics.h> |
72 | #include <asm/machvec.h> | 72 | #include <asm/machvec.h> |
73 | #include <asm/page.h> | 73 | #include <asm/page.h> |
74 | #include <asm/system.h> | ||
75 | #include <asm-generic/iomap.h> | 74 | #include <asm-generic/iomap.h> |
76 | 75 | ||
77 | /* | 76 | /* |
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h index f82d6be2ecd2..2b68d856dc78 100644 --- a/arch/ia64/include/asm/irqflags.h +++ b/arch/ia64/include/asm/irqflags.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _ASM_IA64_IRQFLAGS_H | 10 | #ifndef _ASM_IA64_IRQFLAGS_H |
11 | #define _ASM_IA64_IRQFLAGS_H | 11 | #define _ASM_IA64_IRQFLAGS_H |
12 | 12 | ||
13 | #include <asm/pal.h> | ||
14 | |||
13 | #ifdef CONFIG_IA64_DEBUG_IRQ | 15 | #ifdef CONFIG_IA64_DEBUG_IRQ |
14 | extern unsigned long last_cli_ip; | 16 | extern unsigned long last_cli_ip; |
15 | static inline void arch_maybe_save_ip(unsigned long flags) | 17 | static inline void arch_maybe_save_ip(unsigned long flags) |
diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h index e1d58f819d78..aea2b81b03a3 100644 --- a/arch/ia64/include/asm/kexec.h +++ b/arch/ia64/include/asm/kexec.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_IA64_KEXEC_H | 1 | #ifndef _ASM_IA64_KEXEC_H |
2 | #define _ASM_IA64_KEXEC_H | 2 | #define _ASM_IA64_KEXEC_H |
3 | 3 | ||
4 | #include <asm/setup.h> | ||
4 | 5 | ||
5 | /* Maximum physical address we can use pages from */ | 6 | /* Maximum physical address we can use pages from */ |
6 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) | 7 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) |
diff --git a/arch/ia64/include/asm/mca_asm.h b/arch/ia64/include/asm/mca_asm.h index dd2a5b134390..13c1d4994d49 100644 --- a/arch/ia64/include/asm/mca_asm.h +++ b/arch/ia64/include/asm/mca_asm.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #ifndef _ASM_IA64_MCA_ASM_H | 15 | #ifndef _ASM_IA64_MCA_ASM_H |
16 | #define _ASM_IA64_MCA_ASM_H | 16 | #define _ASM_IA64_MCA_ASM_H |
17 | 17 | ||
18 | #include <asm/percpu.h> | ||
19 | |||
18 | #define PSR_IC 13 | 20 | #define PSR_IC 13 |
19 | #define PSR_I 14 | 21 | #define PSR_I 14 |
20 | #define PSR_DT 17 | 22 | #define PSR_DT 17 |
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index 961a16f43e6b..f1e1b2e3cdb3 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h | |||
@@ -221,4 +221,14 @@ get_order (unsigned long size) | |||
221 | (((current->personality & READ_IMPLIES_EXEC) != 0) \ | 221 | (((current->personality & READ_IMPLIES_EXEC) != 0) \ |
222 | ? VM_EXEC : 0)) | 222 | ? VM_EXEC : 0)) |
223 | 223 | ||
224 | #define GATE_ADDR RGN_BASE(RGN_GATE) | ||
225 | |||
226 | /* | ||
227 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE | ||
228 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) | ||
229 | */ | ||
230 | #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) | ||
231 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) | ||
232 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) | ||
233 | |||
224 | #endif /* _ASM_IA64_PAGE_H */ | 234 | #endif /* _ASM_IA64_PAGE_H */ |
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 279b38ae74aa..c5e6da95522c 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h | |||
@@ -11,6 +11,14 @@ | |||
11 | #include <asm/scatterlist.h> | 11 | #include <asm/scatterlist.h> |
12 | #include <asm/hw_irq.h> | 12 | #include <asm/hw_irq.h> |
13 | 13 | ||
14 | struct pci_vector_struct { | ||
15 | __u16 segment; /* PCI Segment number */ | ||
16 | __u16 bus; /* PCI Bus number */ | ||
17 | __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ | ||
18 | __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ | ||
19 | __u32 irq; /* IRQ assigned */ | ||
20 | }; | ||
21 | |||
14 | /* | 22 | /* |
15 | * Can be used to override the logic in pci_scan_bus for skipping already-configured bus | 23 | * Can be used to override the logic in pci_scan_bus for skipping already-configured bus |
16 | * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the | 24 | * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the |
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 1a97af31ef17..815810cbbedc 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <asm/mman.h> | 16 | #include <asm/mman.h> |
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <asm/system.h> | ||
20 | #include <asm/types.h> | 19 | #include <asm/types.h> |
21 | 20 | ||
22 | #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ | 21 | #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 691be0b95c1e..483f6c6a4238 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -19,6 +19,9 @@ | |||
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/ustack.h> | 20 | #include <asm/ustack.h> |
21 | 21 | ||
22 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
23 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | ||
24 | |||
22 | #define IA64_NUM_PHYS_STACK_REG 96 | 25 | #define IA64_NUM_PHYS_STACK_REG 96 |
23 | #define IA64_NUM_DBG_REGS 8 | 26 | #define IA64_NUM_DBG_REGS 8 |
24 | 27 | ||
@@ -720,6 +723,11 @@ extern unsigned long boot_option_idle_override; | |||
720 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, | 723 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, |
721 | IDLE_NOMWAIT, IDLE_POLL}; | 724 | IDLE_NOMWAIT, IDLE_POLL}; |
722 | 725 | ||
726 | void cpu_idle_wait(void); | ||
727 | void default_idle(void); | ||
728 | |||
729 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | ||
730 | |||
723 | #endif /* !__ASSEMBLY__ */ | 731 | #endif /* !__ASSEMBLY__ */ |
724 | 732 | ||
725 | #endif /* _ASM_IA64_PROCESSOR_H */ | 733 | #endif /* _ASM_IA64_PROCESSOR_H */ |
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index d19ddba4e327..e504f382115e 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/efi.h> | 40 | #include <linux/efi.h> |
41 | 41 | ||
42 | #include <asm/pal.h> | 42 | #include <asm/pal.h> |
43 | #include <asm/system.h> | ||
44 | #include <asm/fpu.h> | 43 | #include <asm/fpu.h> |
45 | 44 | ||
46 | extern spinlock_t sal_lock; | 45 | extern spinlock_t sal_lock; |
diff --git a/arch/ia64/include/asm/setup.h b/arch/ia64/include/asm/setup.h index 4399a44355b3..8d56458310b3 100644 --- a/arch/ia64/include/asm/setup.h +++ b/arch/ia64/include/asm/setup.h | |||
@@ -3,4 +3,22 @@ | |||
3 | 3 | ||
4 | #define COMMAND_LINE_SIZE 2048 | 4 | #define COMMAND_LINE_SIZE 2048 |
5 | 5 | ||
6 | extern struct ia64_boot_param { | ||
7 | __u64 command_line; /* physical address of command line arguments */ | ||
8 | __u64 efi_systab; /* physical address of EFI system table */ | ||
9 | __u64 efi_memmap; /* physical address of EFI memory map */ | ||
10 | __u64 efi_memmap_size; /* size of EFI memory map */ | ||
11 | __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ | ||
12 | __u32 efi_memdesc_version; /* memory descriptor version */ | ||
13 | struct { | ||
14 | __u16 num_cols; /* number of columns on console output device */ | ||
15 | __u16 num_rows; /* number of rows on console output device */ | ||
16 | __u16 orig_x; /* cursor's x position */ | ||
17 | __u16 orig_y; /* cursor's y position */ | ||
18 | } console_info; | ||
19 | __u64 fpswa; /* physical address of the fpswa interface */ | ||
20 | __u64 initrd_start; | ||
21 | __u64 initrd_size; | ||
22 | } *ia64_boot_param; | ||
23 | |||
6 | #endif | 24 | #endif |
diff --git a/arch/ia64/include/asm/sn/pda.h b/arch/ia64/include/asm/sn/pda.h index 1c5108d44d8b..22ae358c8d16 100644 --- a/arch/ia64/include/asm/sn/pda.h +++ b/arch/ia64/include/asm/sn/pda.h | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <linux/cache.h> | 11 | #include <linux/cache.h> |
12 | #include <asm/percpu.h> | 12 | #include <asm/percpu.h> |
13 | #include <asm/system.h> | ||
14 | 13 | ||
15 | 14 | ||
16 | /* | 15 | /* |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index b77768d35f93..54ff557d474e 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -15,7 +15,6 @@ | |||
15 | 15 | ||
16 | #include <linux/atomic.h> | 16 | #include <linux/atomic.h> |
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | ||
19 | 18 | ||
20 | #define arch_spin_lock_init(x) ((x)->lock = 0) | 19 | #define arch_spin_lock_init(x) ((x)->lock = 0) |
21 | 20 | ||
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h new file mode 100644 index 000000000000..cb2412fcd17f --- /dev/null +++ b/arch/ia64/include/asm/switch_to.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Low-level task switching. This is based on information published in | ||
3 | * the Processor Abstraction Layer and the System Abstraction Layer | ||
4 | * manual. | ||
5 | * | ||
6 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
10 | */ | ||
11 | #ifndef _ASM_IA64_SWITCH_TO_H | ||
12 | #define _ASM_IA64_SWITCH_TO_H | ||
13 | |||
14 | #include <linux/percpu.h> | ||
15 | |||
16 | struct task_struct; | ||
17 | |||
18 | /* | ||
19 | * Context switch from one thread to another. If the two threads have | ||
20 | * different address spaces, schedule() has already taken care of | ||
21 | * switching to the new address space by calling switch_mm(). | ||
22 | * | ||
23 | * Disabling access to the fph partition and the debug-register | ||
24 | * context switch MUST be done before calling ia64_switch_to() since a | ||
25 | * newly created thread returns directly to | ||
26 | * ia64_ret_from_syscall_clear_r8. | ||
27 | */ | ||
28 | extern struct task_struct *ia64_switch_to (void *next_task); | ||
29 | |||
30 | extern void ia64_save_extra (struct task_struct *task); | ||
31 | extern void ia64_load_extra (struct task_struct *task); | ||
32 | |||
33 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
34 | extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); | ||
35 | # define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) | ||
36 | #else | ||
37 | # define IA64_ACCOUNT_ON_SWITCH(p,n) | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_PERFMON | ||
41 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | ||
42 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | ||
43 | #else | ||
44 | # define PERFMON_IS_SYSWIDE() (0) | ||
45 | #endif | ||
46 | |||
47 | #define IA64_HAS_EXTRA_STATE(t) \ | ||
48 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | ||
49 | || PERFMON_IS_SYSWIDE()) | ||
50 | |||
51 | #define __switch_to(prev,next,last) do { \ | ||
52 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | ||
53 | if (IA64_HAS_EXTRA_STATE(prev)) \ | ||
54 | ia64_save_extra(prev); \ | ||
55 | if (IA64_HAS_EXTRA_STATE(next)) \ | ||
56 | ia64_load_extra(next); \ | ||
57 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ | ||
58 | (last) = ia64_switch_to((next)); \ | ||
59 | } while (0) | ||
60 | |||
61 | #ifdef CONFIG_SMP | ||
62 | /* | ||
63 | * In the SMP case, we save the fph state when context-switching away from a thread that | ||
64 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can | ||
65 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch | ||
66 | * the latest fph state from another CPU. In other words: eager save, lazy restore. | ||
67 | */ | ||
68 | # define switch_to(prev,next,last) do { \ | ||
69 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ | ||
70 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ | ||
71 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ | ||
72 | __ia64_save_fpu((prev)->thread.fph); \ | ||
73 | } \ | ||
74 | __switch_to(prev, next, last); \ | ||
75 | /* "next" in old context is "current" in new context */ \ | ||
76 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ | ||
77 | (task_cpu(current) != \ | ||
78 | task_thread_info(current)->last_cpu))) { \ | ||
79 | platform_migrate(current); \ | ||
80 | task_thread_info(current)->last_cpu = task_cpu(current); \ | ||
81 | } \ | ||
82 | } while (0) | ||
83 | #else | ||
84 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | ||
85 | #endif | ||
86 | |||
87 | #endif /* _ASM_IA64_SWITCH_TO_H */ | ||
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 6cca30705d50..5b190b48fcd0 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h | |||
@@ -1,203 +1,4 @@ | |||
1 | #ifndef _ASM_IA64_SYSTEM_H | 1 | /* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ |
2 | #define _ASM_IA64_SYSTEM_H | 2 | #include <asm/barrier.h> |
3 | 3 | #include <asm/exec.h> | |
4 | /* | 4 | #include <asm/switch_to.h> |
5 | * System defines. Note that this is included both from .c and .S | ||
6 | * files, so it does only defines, not any C code. This is based | ||
7 | * on information published in the Processor Abstraction Layer | ||
8 | * and the System Abstraction Layer manual. | ||
9 | * | ||
10 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
11 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
12 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | ||
13 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | ||
14 | */ | ||
15 | |||
16 | #include <asm/kregs.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/pal.h> | ||
19 | #include <asm/percpu.h> | ||
20 | |||
21 | #define GATE_ADDR RGN_BASE(RGN_GATE) | ||
22 | |||
23 | /* | ||
24 | * 0xa000000000000000+2*PERCPU_PAGE_SIZE | ||
25 | * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) | ||
26 | */ | ||
27 | #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) | ||
28 | #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) | ||
29 | #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) | ||
30 | |||
31 | #ifndef __ASSEMBLY__ | ||
32 | |||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/types.h> | ||
35 | |||
36 | #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ | ||
37 | |||
38 | struct pci_vector_struct { | ||
39 | __u16 segment; /* PCI Segment number */ | ||
40 | __u16 bus; /* PCI Bus number */ | ||
41 | __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ | ||
42 | __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ | ||
43 | __u32 irq; /* IRQ assigned */ | ||
44 | }; | ||
45 | |||
46 | extern struct ia64_boot_param { | ||
47 | __u64 command_line; /* physical address of command line arguments */ | ||
48 | __u64 efi_systab; /* physical address of EFI system table */ | ||
49 | __u64 efi_memmap; /* physical address of EFI memory map */ | ||
50 | __u64 efi_memmap_size; /* size of EFI memory map */ | ||
51 | __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ | ||
52 | __u32 efi_memdesc_version; /* memory descriptor version */ | ||
53 | struct { | ||
54 | __u16 num_cols; /* number of columns on console output device */ | ||
55 | __u16 num_rows; /* number of rows on console output device */ | ||
56 | __u16 orig_x; /* cursor's x position */ | ||
57 | __u16 orig_y; /* cursor's y position */ | ||
58 | } console_info; | ||
59 | __u64 fpswa; /* physical address of the fpswa interface */ | ||
60 | __u64 initrd_start; | ||
61 | __u64 initrd_size; | ||
62 | } *ia64_boot_param; | ||
63 | |||
64 | /* | ||
65 | * Macros to force memory ordering. In these descriptions, "previous" | ||
66 | * and "subsequent" refer to program order; "visible" means that all | ||
67 | * architecturally visible effects of a memory access have occurred | ||
68 | * (at a minimum, this means the memory has been read or written). | ||
69 | * | ||
70 | * wmb(): Guarantees that all preceding stores to memory- | ||
71 | * like regions are visible before any subsequent | ||
72 | * stores and that all following stores will be | ||
73 | * visible only after all previous stores. | ||
74 | * rmb(): Like wmb(), but for reads. | ||
75 | * mb(): wmb()/rmb() combo, i.e., all previous memory | ||
76 | * accesses are visible before all subsequent | ||
77 | * accesses and vice versa. This is also known as | ||
78 | * a "fence." | ||
79 | * | ||
80 | * Note: "mb()" and its variants cannot be used as a fence to order | ||
81 | * accesses to memory mapped I/O registers. For that, mf.a needs to | ||
82 | * be used. However, we don't want to always use mf.a because (a) | ||
83 | * it's (presumably) much slower than mf and (b) mf.a is supported for | ||
84 | * sequential memory pages only. | ||
85 | */ | ||
86 | #define mb() ia64_mf() | ||
87 | #define rmb() mb() | ||
88 | #define wmb() mb() | ||
89 | #define read_barrier_depends() do { } while(0) | ||
90 | |||
91 | #ifdef CONFIG_SMP | ||
92 | # define smp_mb() mb() | ||
93 | # define smp_rmb() rmb() | ||
94 | # define smp_wmb() wmb() | ||
95 | # define smp_read_barrier_depends() read_barrier_depends() | ||
96 | #else | ||
97 | # define smp_mb() barrier() | ||
98 | # define smp_rmb() barrier() | ||
99 | # define smp_wmb() barrier() | ||
100 | # define smp_read_barrier_depends() do { } while(0) | ||
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * XXX check on this ---I suspect what Linus really wants here is | ||
105 | * acquire vs release semantics but we can't discuss this stuff with | ||
106 | * Linus just yet. Grrr... | ||
107 | */ | ||
108 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | ||
109 | |||
110 | /* | ||
111 | * The group barrier in front of the rsm & ssm are necessary to ensure | ||
112 | * that none of the previous instructions in the same group are | ||
113 | * affected by the rsm/ssm. | ||
114 | */ | ||
115 | |||
116 | #ifdef __KERNEL__ | ||
117 | |||
118 | /* | ||
119 | * Context switch from one thread to another. If the two threads have | ||
120 | * different address spaces, schedule() has already taken care of | ||
121 | * switching to the new address space by calling switch_mm(). | ||
122 | * | ||
123 | * Disabling access to the fph partition and the debug-register | ||
124 | * context switch MUST be done before calling ia64_switch_to() since a | ||
125 | * newly created thread returns directly to | ||
126 | * ia64_ret_from_syscall_clear_r8. | ||
127 | */ | ||
128 | extern struct task_struct *ia64_switch_to (void *next_task); | ||
129 | |||
130 | struct task_struct; | ||
131 | |||
132 | extern void ia64_save_extra (struct task_struct *task); | ||
133 | extern void ia64_load_extra (struct task_struct *task); | ||
134 | |||
135 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
136 | extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); | ||
137 | # define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) | ||
138 | #else | ||
139 | # define IA64_ACCOUNT_ON_SWITCH(p,n) | ||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_PERFMON | ||
143 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | ||
144 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | ||
145 | #else | ||
146 | # define PERFMON_IS_SYSWIDE() (0) | ||
147 | #endif | ||
148 | |||
149 | #define IA64_HAS_EXTRA_STATE(t) \ | ||
150 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ | ||
151 | || PERFMON_IS_SYSWIDE()) | ||
152 | |||
153 | #define __switch_to(prev,next,last) do { \ | ||
154 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | ||
155 | if (IA64_HAS_EXTRA_STATE(prev)) \ | ||
156 | ia64_save_extra(prev); \ | ||
157 | if (IA64_HAS_EXTRA_STATE(next)) \ | ||
158 | ia64_load_extra(next); \ | ||
159 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ | ||
160 | (last) = ia64_switch_to((next)); \ | ||
161 | } while (0) | ||
162 | |||
163 | #ifdef CONFIG_SMP | ||
164 | /* | ||
165 | * In the SMP case, we save the fph state when context-switching away from a thread that | ||
166 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can | ||
167 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch | ||
168 | * the latest fph state from another CPU. In other words: eager save, lazy restore. | ||
169 | */ | ||
170 | # define switch_to(prev,next,last) do { \ | ||
171 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ | ||
172 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ | ||
173 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ | ||
174 | __ia64_save_fpu((prev)->thread.fph); \ | ||
175 | } \ | ||
176 | __switch_to(prev, next, last); \ | ||
177 | /* "next" in old context is "current" in new context */ \ | ||
178 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ | ||
179 | (task_cpu(current) != \ | ||
180 | task_thread_info(current)->last_cpu))) { \ | ||
181 | platform_migrate(current); \ | ||
182 | task_thread_info(current)->last_cpu = task_cpu(current); \ | ||
183 | } \ | ||
184 | } while (0) | ||
185 | #else | ||
186 | # define switch_to(prev,next,last) __switch_to(prev, next, last) | ||
187 | #endif | ||
188 | |||
189 | #define __ARCH_WANT_UNLOCKED_CTXSW | ||
190 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | ||
191 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | ||
192 | |||
193 | void cpu_idle_wait(void); | ||
194 | |||
195 | #define arch_align_stack(x) (x) | ||
196 | |||
197 | void default_idle(void); | ||
198 | |||
199 | #endif /* __KERNEL__ */ | ||
200 | |||
201 | #endif /* __ASSEMBLY__ */ | ||
202 | |||
203 | #endif /* _ASM_IA64_SYSTEM_H */ | ||
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h index 61b5bdfd980e..8f6cbaa742e9 100644 --- a/arch/ia64/include/asm/uv/uv.h +++ b/arch/ia64/include/asm/uv/uv.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_IA64_UV_UV_H | 1 | #ifndef _ASM_IA64_UV_UV_H |
2 | #define _ASM_IA64_UV_UV_H | 2 | #define _ASM_IA64_UV_UV_H |
3 | 3 | ||
4 | #include <asm/system.h> | ||
5 | #include <asm/sn/simulator.h> | 4 | #include <asm/sn/simulator.h> |
6 | 5 | ||
7 | static inline int is_uv_system(void) | 6 | static inline int is_uv_system(void) |