diff options
Diffstat (limited to 'include/asm-ia64')
36 files changed, 429 insertions, 217 deletions
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 874a6f890e75..2fbebf85c31d 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h | |||
@@ -88,6 +88,18 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) | |||
88 | return new; | 88 | return new; |
89 | } | 89 | } |
90 | 90 | ||
91 | #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | ||
92 | |||
93 | #define atomic_add_unless(v, a, u) \ | ||
94 | ({ \ | ||
95 | int c, old; \ | ||
96 | c = atomic_read(v); \ | ||
97 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
98 | c = old; \ | ||
99 | c != (u); \ | ||
100 | }) | ||
101 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
102 | |||
91 | #define atomic_add_return(i,v) \ | 103 | #define atomic_add_return(i,v) \ |
92 | ({ \ | 104 | ({ \ |
93 | int __ia64_aar_i = (i); \ | 105 | int __ia64_aar_i = (i); \ |
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index 6347c9845642..df67d40801de 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h | |||
@@ -48,12 +48,7 @@ dma_set_mask (struct device *dev, u64 mask) | |||
48 | return 0; | 48 | return 0; |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline int | 51 | extern int dma_get_cache_alignment(void); |
52 | dma_get_cache_alignment (void) | ||
53 | { | ||
54 | extern int ia64_max_cacheline_size; | ||
55 | return ia64_max_cacheline_size; | ||
56 | } | ||
57 | 52 | ||
58 | static inline void | 53 | static inline void |
59 | dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) | 54 | dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) |
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 4d376e1663f7..8b01a083dde6 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h | |||
@@ -22,6 +22,9 @@ | |||
22 | * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy | 22 | * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy |
23 | * <anil.s.keshavamurthy@intel.com> adopted from | 23 | * <anil.s.keshavamurthy@intel.com> adopted from |
24 | * include/asm-x86_64/kdebug.h | 24 | * include/asm-x86_64/kdebug.h |
25 | * | ||
26 | * 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more | ||
27 | * events. | ||
25 | */ | 28 | */ |
26 | #include <linux/notifier.h> | 29 | #include <linux/notifier.h> |
27 | 30 | ||
@@ -35,13 +38,36 @@ struct die_args { | |||
35 | int signr; | 38 | int signr; |
36 | }; | 39 | }; |
37 | 40 | ||
38 | int register_die_notifier(struct notifier_block *nb); | 41 | extern int register_die_notifier(struct notifier_block *); |
42 | extern int unregister_die_notifier(struct notifier_block *); | ||
39 | extern struct notifier_block *ia64die_chain; | 43 | extern struct notifier_block *ia64die_chain; |
40 | 44 | ||
41 | enum die_val { | 45 | enum die_val { |
42 | DIE_BREAK = 1, | 46 | DIE_BREAK = 1, |
43 | DIE_SS, | 47 | DIE_FAULT, |
48 | DIE_OOPS, | ||
44 | DIE_PAGE_FAULT, | 49 | DIE_PAGE_FAULT, |
50 | DIE_MACHINE_HALT, | ||
51 | DIE_MACHINE_RESTART, | ||
52 | DIE_MCA_MONARCH_ENTER, | ||
53 | DIE_MCA_MONARCH_PROCESS, | ||
54 | DIE_MCA_MONARCH_LEAVE, | ||
55 | DIE_MCA_SLAVE_ENTER, | ||
56 | DIE_MCA_SLAVE_PROCESS, | ||
57 | DIE_MCA_SLAVE_LEAVE, | ||
58 | DIE_MCA_RENDZVOUS_ENTER, | ||
59 | DIE_MCA_RENDZVOUS_PROCESS, | ||
60 | DIE_MCA_RENDZVOUS_LEAVE, | ||
61 | DIE_INIT_MONARCH_ENTER, | ||
62 | DIE_INIT_MONARCH_PROCESS, | ||
63 | DIE_INIT_MONARCH_LEAVE, | ||
64 | DIE_INIT_SLAVE_ENTER, | ||
65 | DIE_INIT_SLAVE_PROCESS, | ||
66 | DIE_INIT_SLAVE_LEAVE, | ||
67 | DIE_KDEBUG_ENTER, | ||
68 | DIE_KDEBUG_LEAVE, | ||
69 | DIE_KDUMP_ENTER, | ||
70 | DIE_KDUMP_LEAVE, | ||
45 | }; | 71 | }; |
46 | 72 | ||
47 | static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, | 73 | static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, |
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 573a3574a24f..592abb000e29 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
@@ -26,6 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/ptrace.h> | 28 | #include <linux/ptrace.h> |
29 | #include <linux/percpu.h> | ||
29 | #include <asm/break.h> | 30 | #include <asm/break.h> |
30 | 31 | ||
31 | #define MAX_INSN_SIZE 16 | 32 | #define MAX_INSN_SIZE 16 |
@@ -62,6 +63,18 @@ typedef struct _bundle { | |||
62 | } quad1; | 63 | } quad1; |
63 | } __attribute__((__aligned__(16))) bundle_t; | 64 | } __attribute__((__aligned__(16))) bundle_t; |
64 | 65 | ||
66 | struct prev_kprobe { | ||
67 | struct kprobe *kp; | ||
68 | unsigned long status; | ||
69 | }; | ||
70 | |||
71 | /* per-cpu kprobe control block */ | ||
72 | struct kprobe_ctlblk { | ||
73 | unsigned long kprobe_status; | ||
74 | struct pt_regs jprobe_saved_regs; | ||
75 | struct prev_kprobe prev_kprobe; | ||
76 | }; | ||
77 | |||
65 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | 78 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry |
66 | 79 | ||
67 | #define ARCH_SUPPORTS_KRETPROBES | 80 | #define ARCH_SUPPORTS_KRETPROBES |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 79e89a7db566..ca5ea994d688 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
@@ -26,7 +26,7 @@ typedef void ia64_mv_cpu_init_t (void); | |||
26 | typedef void ia64_mv_irq_init_t (void); | 26 | typedef void ia64_mv_irq_init_t (void); |
27 | typedef void ia64_mv_send_ipi_t (int, int, int, int); | 27 | typedef void ia64_mv_send_ipi_t (int, int, int, int); |
28 | typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *); | 28 | typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *); |
29 | typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long); | 29 | typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); |
30 | typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); | 30 | typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); |
31 | typedef unsigned int ia64_mv_local_vector_to_irq (u8); | 31 | typedef unsigned int ia64_mv_local_vector_to_irq (u8); |
32 | typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); | 32 | typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); |
@@ -37,7 +37,7 @@ typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, | |||
37 | 37 | ||
38 | /* DMA-mapping interface: */ | 38 | /* DMA-mapping interface: */ |
39 | typedef void ia64_mv_dma_init (void); | 39 | typedef void ia64_mv_dma_init (void); |
40 | typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int); | 40 | typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); |
41 | typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); | 41 | typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); |
42 | typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); | 42 | typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); |
43 | typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); | 43 | typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); |
diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h index daafe504c5f4..e90daf9ce340 100644 --- a/include/asm-ia64/machvec_hpzx1.h +++ b/include/asm-ia64/machvec_hpzx1.h | |||
@@ -1,8 +1,7 @@ | |||
1 | #ifndef _ASM_IA64_MACHVEC_HPZX1_h | 1 | #ifndef _ASM_IA64_MACHVEC_HPZX1_h |
2 | #define _ASM_IA64_MACHVEC_HPZX1_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_setup_t sba_setup; | ||
6 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | 5 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; |
7 | extern ia64_mv_dma_free_coherent sba_free_coherent; | 6 | extern ia64_mv_dma_free_coherent sba_free_coherent; |
8 | extern ia64_mv_dma_map_single sba_map_single; | 7 | extern ia64_mv_dma_map_single sba_map_single; |
@@ -19,15 +18,15 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | |||
19 | * platform's machvec structure. When compiling a non-generic kernel, | 18 | * platform's machvec structure. When compiling a non-generic kernel, |
20 | * the macros are used directly. | 19 | * the macros are used directly. |
21 | */ | 20 | */ |
22 | #define platform_name "hpzx1" | 21 | #define platform_name "hpzx1" |
23 | #define platform_setup sba_setup | 22 | #define platform_setup dig_setup |
24 | #define platform_dma_init machvec_noop | 23 | #define platform_dma_init machvec_noop |
25 | #define platform_dma_alloc_coherent sba_alloc_coherent | 24 | #define platform_dma_alloc_coherent sba_alloc_coherent |
26 | #define platform_dma_free_coherent sba_free_coherent | 25 | #define platform_dma_free_coherent sba_free_coherent |
27 | #define platform_dma_map_single sba_map_single | 26 | #define platform_dma_map_single sba_map_single |
28 | #define platform_dma_unmap_single sba_unmap_single | 27 | #define platform_dma_unmap_single sba_unmap_single |
29 | #define platform_dma_map_sg sba_map_sg | 28 | #define platform_dma_map_sg sba_map_sg |
30 | #define platform_dma_unmap_sg sba_unmap_sg | 29 | #define platform_dma_unmap_sg sba_unmap_sg |
31 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | 30 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single |
32 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | 31 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg |
33 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | 32 | #define platform_dma_sync_single_for_device machvec_dma_sync_single |
diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h index 9924b1b00a6c..f00a34a148ff 100644 --- a/include/asm-ia64/machvec_hpzx1_swiotlb.h +++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_init hwsw_init; | ||
6 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; | 5 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; |
7 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; | 6 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; |
8 | extern ia64_mv_dma_map_single hwsw_map_single; | 7 | extern ia64_mv_dma_map_single hwsw_map_single; |
@@ -26,7 +25,7 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | |||
26 | #define platform_name "hpzx1_swiotlb" | 25 | #define platform_name "hpzx1_swiotlb" |
27 | 26 | ||
28 | #define platform_setup dig_setup | 27 | #define platform_setup dig_setup |
29 | #define platform_dma_init hwsw_init | 28 | #define platform_dma_init machvec_noop |
30 | #define platform_dma_alloc_coherent hwsw_alloc_coherent | 29 | #define platform_dma_alloc_coherent hwsw_alloc_coherent |
31 | #define platform_dma_free_coherent hwsw_free_coherent | 30 | #define platform_dma_free_coherent hwsw_free_coherent |
32 | #define platform_dma_map_single hwsw_map_single | 31 | #define platform_dma_map_single hwsw_map_single |
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 97a28b8b2ddd..c7d9c9ed38ba 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -80,7 +80,12 @@ struct ia64_sal_os_state { | |||
80 | u64 sal_ra; /* Return address in SAL, physical */ | 80 | u64 sal_ra; /* Return address in SAL, physical */ |
81 | u64 sal_gp; /* GP of the SAL - physical */ | 81 | u64 sal_gp; /* GP of the SAL - physical */ |
82 | pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ | 82 | pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ |
83 | /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK). | ||
84 | * Note: if the MCA/INIT recovery code wants to resume to a new context | ||
85 | * then it must change these values to reflect the new kernel stack. | ||
86 | */ | ||
83 | u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ | 87 | u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ |
88 | u64 prev_IA64_KR_CURRENT_STACK; | ||
84 | struct task_struct *prev_task; /* previous task, NULL if it is not useful */ | 89 | struct task_struct *prev_task; /* previous task, NULL if it is not useful */ |
85 | /* Some interrupt registers are not saved in minstate, pt_regs or | 90 | /* Some interrupt registers are not saved in minstate, pt_regs or |
86 | * switch_stack. Because MCA/INIT can occur when interrupts are | 91 | * switch_stack. Because MCA/INIT can occur when interrupts are |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 1590dc65b30b..46501b01a5c5 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
@@ -16,10 +16,11 @@ | |||
16 | * - initrd (optional) | 16 | * - initrd (optional) |
17 | * - command line string | 17 | * - command line string |
18 | * - kernel code & data | 18 | * - kernel code & data |
19 | * - Kernel memory map built from EFI memory map | ||
19 | * | 20 | * |
20 | * More could be added if necessary | 21 | * More could be added if necessary |
21 | */ | 22 | */ |
22 | #define IA64_MAX_RSVD_REGIONS 5 | 23 | #define IA64_MAX_RSVD_REGIONS 6 |
23 | 24 | ||
24 | struct rsvd_region { | 25 | struct rsvd_region { |
25 | unsigned long start; /* virtual address of beginning of element */ | 26 | unsigned long start; /* virtual address of beginning of element */ |
@@ -33,6 +34,7 @@ extern void find_memory (void); | |||
33 | extern void reserve_memory (void); | 34 | extern void reserve_memory (void); |
34 | extern void find_initrd (void); | 35 | extern void find_initrd (void); |
35 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); | 36 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); |
37 | extern void efi_memmap_init(unsigned long *, unsigned long *); | ||
36 | 38 | ||
37 | /* | 39 | /* |
38 | * For rounding an address to the next IA64_GRANULE_SIZE or order | 40 | * For rounding an address to the next IA64_GRANULE_SIZE or order |
@@ -41,7 +43,7 @@ extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg | |||
41 | #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) | 43 | #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) |
42 | #define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1)) | 44 | #define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1)) |
43 | 45 | ||
44 | #ifdef CONFIG_DISCONTIGMEM | 46 | #ifdef CONFIG_NUMA |
45 | extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); | 47 | extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); |
46 | #else | 48 | #else |
47 | # define call_pernode_memory(start, len, func) (*func)(start, len, 0) | 49 | # define call_pernode_memory(start, len, func) (*func)(start, len, 0) |
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 8d6e72f7b08e..b5c65081a3aa 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h | |||
@@ -7,12 +7,13 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Routines to manage the allocation of task context numbers. Task context numbers are | 10 | * Routines to manage the allocation of task context numbers. Task context |
11 | * used to reduce or eliminate the need to perform TLB flushes due to context switches. | 11 | * numbers are used to reduce or eliminate the need to perform TLB flushes |
12 | * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not | 12 | * due to context switches. Context numbers are implemented using ia-64 |
13 | * consider the region number when performing a TLB lookup, we need to assign a unique | 13 | * region ids. Since the IA-64 TLB does not consider the region number when |
14 | * region id to each region in a process. We use the least significant three bits in a | 14 | * performing a TLB lookup, we need to assign a unique region id to each |
15 | * region id for this purpose. | 15 | * region in a process. We use the least significant three bits in aregion |
16 | * id for this purpose. | ||
16 | */ | 17 | */ |
17 | 18 | ||
18 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ | 19 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ |
@@ -32,13 +33,17 @@ | |||
32 | struct ia64_ctx { | 33 | struct ia64_ctx { |
33 | spinlock_t lock; | 34 | spinlock_t lock; |
34 | unsigned int next; /* next context number to use */ | 35 | unsigned int next; /* next context number to use */ |
35 | unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ | 36 | unsigned int limit; /* available free range */ |
36 | unsigned int max_ctx; /* max. context value supported by all CPUs */ | 37 | unsigned int max_ctx; /* max. context value supported by all CPUs */ |
38 | /* call wrap_mmu_context when next >= max */ | ||
39 | unsigned long *bitmap; /* bitmap size is max_ctx+1 */ | ||
40 | unsigned long *flushmap;/* pending rid to be flushed */ | ||
37 | }; | 41 | }; |
38 | 42 | ||
39 | extern struct ia64_ctx ia64_ctx; | 43 | extern struct ia64_ctx ia64_ctx; |
40 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); | 44 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); |
41 | 45 | ||
46 | extern void mmu_context_init (void); | ||
42 | extern void wrap_mmu_context (struct mm_struct *mm); | 47 | extern void wrap_mmu_context (struct mm_struct *mm); |
43 | 48 | ||
44 | static inline void | 49 | static inline void |
@@ -47,10 +52,10 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) | |||
47 | } | 52 | } |
48 | 53 | ||
49 | /* | 54 | /* |
50 | * When the context counter wraps around all TLBs need to be flushed because an old | 55 | * When the context counter wraps around all TLBs need to be flushed because |
51 | * context number might have been reused. This is signalled by the ia64_need_tlb_flush | 56 | * an old context number might have been reused. This is signalled by the |
52 | * per-CPU variable, which is checked in the routine below. Called by activate_mm(). | 57 | * ia64_need_tlb_flush per-CPU variable, which is checked in the routine |
53 | * <efocht@ess.nec.de> | 58 | * below. Called by activate_mm(). <efocht@ess.nec.de> |
54 | */ | 59 | */ |
55 | static inline void | 60 | static inline void |
56 | delayed_tlb_flush (void) | 61 | delayed_tlb_flush (void) |
@@ -60,11 +65,9 @@ delayed_tlb_flush (void) | |||
60 | 65 | ||
61 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | 66 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { |
62 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 67 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
63 | { | 68 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { |
64 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { | 69 | local_flush_tlb_all(); |
65 | local_flush_tlb_all(); | 70 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; |
66 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | ||
67 | } | ||
68 | } | 71 | } |
69 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 72 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
70 | } | 73 | } |
@@ -76,20 +79,27 @@ get_mmu_context (struct mm_struct *mm) | |||
76 | unsigned long flags; | 79 | unsigned long flags; |
77 | nv_mm_context_t context = mm->context; | 80 | nv_mm_context_t context = mm->context; |
78 | 81 | ||
79 | if (unlikely(!context)) { | 82 | if (likely(context)) |
80 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 83 | goto out; |
81 | { | 84 | |
82 | /* re-check, now that we've got the lock: */ | 85 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
83 | context = mm->context; | 86 | /* re-check, now that we've got the lock: */ |
84 | if (context == 0) { | 87 | context = mm->context; |
85 | cpus_clear(mm->cpu_vm_mask); | 88 | if (context == 0) { |
86 | if (ia64_ctx.next >= ia64_ctx.limit) | 89 | cpus_clear(mm->cpu_vm_mask); |
87 | wrap_mmu_context(mm); | 90 | if (ia64_ctx.next >= ia64_ctx.limit) { |
88 | mm->context = context = ia64_ctx.next++; | 91 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, |
89 | } | 92 | ia64_ctx.max_ctx, ia64_ctx.next); |
93 | ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, | ||
94 | ia64_ctx.max_ctx, ia64_ctx.next); | ||
95 | if (ia64_ctx.next >= ia64_ctx.max_ctx) | ||
96 | wrap_mmu_context(mm); | ||
90 | } | 97 | } |
91 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 98 | mm->context = context = ia64_ctx.next++; |
99 | __set_bit(context, ia64_ctx.bitmap); | ||
92 | } | 100 | } |
101 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | ||
102 | out: | ||
93 | /* | 103 | /* |
94 | * Ensure we're not starting to use "context" before any old | 104 | * Ensure we're not starting to use "context" before any old |
95 | * uses of it are gone from our TLB. | 105 | * uses of it are gone from our TLB. |
@@ -100,8 +110,8 @@ get_mmu_context (struct mm_struct *mm) | |||
100 | } | 110 | } |
101 | 111 | ||
102 | /* | 112 | /* |
103 | * Initialize context number to some sane value. MM is guaranteed to be a brand-new | 113 | * Initialize context number to some sane value. MM is guaranteed to be a |
104 | * address-space, so no TLB flushing is needed, ever. | 114 | * brand-new address-space, so no TLB flushing is needed, ever. |
105 | */ | 115 | */ |
106 | static inline int | 116 | static inline int |
107 | init_new_context (struct task_struct *p, struct mm_struct *mm) | 117 | init_new_context (struct task_struct *p, struct mm_struct *mm) |
@@ -162,7 +172,10 @@ activate_context (struct mm_struct *mm) | |||
162 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 172 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) |
163 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 173 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); |
164 | reload_context(context); | 174 | reload_context(context); |
165 | /* in the unlikely event of a TLB-flush by another thread, redo the load: */ | 175 | /* |
176 | * in the unlikely event of a TLB-flush by another thread, | ||
177 | * redo the load. | ||
178 | */ | ||
166 | } while (unlikely(context != mm->context)); | 179 | } while (unlikely(context != mm->context)); |
167 | } | 180 | } |
168 | 181 | ||
@@ -175,8 +188,8 @@ static inline void | |||
175 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | 188 | activate_mm (struct mm_struct *prev, struct mm_struct *next) |
176 | { | 189 | { |
177 | /* | 190 | /* |
178 | * We may get interrupts here, but that's OK because interrupt handlers cannot | 191 | * We may get interrupts here, but that's OK because interrupt |
179 | * touch user-space. | 192 | * handlers cannot touch user-space. |
180 | */ | 193 | */ |
181 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); | 194 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); |
182 | activate_context(next); | 195 | activate_context(next); |
diff --git a/include/asm-ia64/mmzone.h b/include/asm-ia64/mmzone.h index d32f51e3d6c2..34efe88eb849 100644 --- a/include/asm-ia64/mmzone.h +++ b/include/asm-ia64/mmzone.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | #include <asm/meminit.h> | 16 | #include <asm/meminit.h> |
17 | 17 | ||
18 | #ifdef CONFIG_DISCONTIGMEM | 18 | #ifdef CONFIG_NUMA |
19 | 19 | ||
20 | static inline int pfn_to_nid(unsigned long pfn) | 20 | static inline int pfn_to_nid(unsigned long pfn) |
21 | { | 21 | { |
@@ -31,6 +31,10 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
31 | #endif | 31 | #endif |
32 | } | 32 | } |
33 | 33 | ||
34 | #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID | ||
35 | extern int early_pfn_to_nid(unsigned long pfn); | ||
36 | #endif | ||
37 | |||
34 | #ifdef CONFIG_IA64_DIG /* DIG systems are small */ | 38 | #ifdef CONFIG_IA64_DIG /* DIG systems are small */ |
35 | # define MAX_PHYSNODE_ID 8 | 39 | # define MAX_PHYSNODE_ID 8 |
36 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) | 40 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) |
@@ -39,8 +43,8 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
39 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) | 43 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) |
40 | #endif | 44 | #endif |
41 | 45 | ||
42 | #else /* CONFIG_DISCONTIGMEM */ | 46 | #else /* CONFIG_NUMA */ |
43 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) | 47 | # define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) |
44 | #endif /* CONFIG_DISCONTIGMEM */ | 48 | #endif /* CONFIG_NUMA */ |
45 | 49 | ||
46 | #endif /* _ASM_IA64_MMZONE_H */ | 50 | #endif /* _ASM_IA64_MMZONE_H */ |
diff --git a/include/asm-ia64/msi.h b/include/asm-ia64/msi.h index 60f2137f9278..97890f7762b3 100644 --- a/include/asm-ia64/msi.h +++ b/include/asm-ia64/msi.h | |||
@@ -12,9 +12,6 @@ | |||
12 | static inline void set_intr_gate (int nr, void *func) {} | 12 | static inline void set_intr_gate (int nr, void *func) {} |
13 | #define IO_APIC_VECTOR(irq) (irq) | 13 | #define IO_APIC_VECTOR(irq) (irq) |
14 | #define ack_APIC_irq ia64_eoi | 14 | #define ack_APIC_irq ia64_eoi |
15 | #define cpu_mask_to_apicid(mask) cpu_physical_id(first_cpu(mask)) | ||
16 | #define MSI_DEST_MODE MSI_PHYSICAL_MODE | ||
17 | #define MSI_TARGET_CPU ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) | ||
18 | #define MSI_TARGET_CPU_SHIFT 4 | 15 | #define MSI_TARGET_CPU_SHIFT 4 |
19 | 16 | ||
20 | #endif /* ASM_MSI_H */ | 17 | #endif /* ASM_MSI_H */ |
diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h index 6b0f3ed89b7e..9978c7ce7549 100644 --- a/include/asm-ia64/nodedata.h +++ b/include/asm-ia64/nodedata.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/percpu.h> | 17 | #include <asm/percpu.h> |
18 | #include <asm/mmzone.h> | 18 | #include <asm/mmzone.h> |
19 | 19 | ||
20 | #ifdef CONFIG_DISCONTIGMEM | 20 | #ifdef CONFIG_NUMA |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Node Data. One of these structures is located on each node of a NUMA system. | 23 | * Node Data. One of these structures is located on each node of a NUMA system. |
@@ -47,6 +47,6 @@ struct ia64_node_data { | |||
47 | */ | 47 | */ |
48 | #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) | 48 | #define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) |
49 | 49 | ||
50 | #endif /* CONFIG_DISCONTIGMEM */ | 50 | #endif /* CONFIG_NUMA */ |
51 | 51 | ||
52 | #endif /* _ASM_IA64_NODEDATA_H */ | 52 | #endif /* _ASM_IA64_NODEDATA_H */ |
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 9edffad8c28b..5e6362a786b7 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h | |||
@@ -47,8 +47,6 @@ | |||
47 | #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ | 47 | #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ |
48 | #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) | 48 | #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) |
49 | 49 | ||
50 | #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ | ||
51 | |||
52 | 50 | ||
53 | #ifdef CONFIG_HUGETLB_PAGE | 51 | #ifdef CONFIG_HUGETLB_PAGE |
54 | # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE) | 52 | # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE) |
@@ -102,24 +100,26 @@ do { \ | |||
102 | 100 | ||
103 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 101 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
104 | extern int ia64_pfn_valid (unsigned long pfn); | 102 | extern int ia64_pfn_valid (unsigned long pfn); |
105 | #else | 103 | #elif defined(CONFIG_FLATMEM) |
106 | # define ia64_pfn_valid(pfn) 1 | 104 | # define ia64_pfn_valid(pfn) 1 |
107 | #endif | 105 | #endif |
108 | 106 | ||
109 | #ifndef CONFIG_DISCONTIGMEM | 107 | #ifdef CONFIG_FLATMEM |
110 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) | 108 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) |
111 | # define page_to_pfn(page) ((unsigned long) (page - mem_map)) | 109 | # define page_to_pfn(page) ((unsigned long) (page - mem_map)) |
112 | # define pfn_to_page(pfn) (mem_map + (pfn)) | 110 | # define pfn_to_page(pfn) (mem_map + (pfn)) |
113 | #else | 111 | #elif defined(CONFIG_DISCONTIGMEM) |
114 | extern struct page *vmem_map; | 112 | extern struct page *vmem_map; |
113 | extern unsigned long min_low_pfn; | ||
115 | extern unsigned long max_low_pfn; | 114 | extern unsigned long max_low_pfn; |
116 | # define pfn_valid(pfn) (((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) | 115 | # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) |
117 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) | 116 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) |
118 | # define pfn_to_page(pfn) (vmem_map + (pfn)) | 117 | # define pfn_to_page(pfn) (vmem_map + (pfn)) |
119 | #endif | 118 | #endif |
120 | 119 | ||
121 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 120 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
122 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 121 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
122 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | ||
123 | 123 | ||
124 | typedef union ia64_va { | 124 | typedef union ia64_va { |
125 | struct { | 125 | struct { |
@@ -174,11 +174,17 @@ get_order (unsigned long size) | |||
174 | */ | 174 | */ |
175 | typedef struct { unsigned long pte; } pte_t; | 175 | typedef struct { unsigned long pte; } pte_t; |
176 | typedef struct { unsigned long pmd; } pmd_t; | 176 | typedef struct { unsigned long pmd; } pmd_t; |
177 | #ifdef CONFIG_PGTABLE_4 | ||
178 | typedef struct { unsigned long pud; } pud_t; | ||
179 | #endif | ||
177 | typedef struct { unsigned long pgd; } pgd_t; | 180 | typedef struct { unsigned long pgd; } pgd_t; |
178 | typedef struct { unsigned long pgprot; } pgprot_t; | 181 | typedef struct { unsigned long pgprot; } pgprot_t; |
179 | 182 | ||
180 | # define pte_val(x) ((x).pte) | 183 | # define pte_val(x) ((x).pte) |
181 | # define pmd_val(x) ((x).pmd) | 184 | # define pmd_val(x) ((x).pmd) |
185 | #ifdef CONFIG_PGTABLE_4 | ||
186 | # define pud_val(x) ((x).pud) | ||
187 | #endif | ||
182 | # define pgd_val(x) ((x).pgd) | 188 | # define pgd_val(x) ((x).pgd) |
183 | # define pgprot_val(x) ((x).pgprot) | 189 | # define pgprot_val(x) ((x).pgprot) |
184 | 190 | ||
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h index a5f214554afd..f2f233846476 100644 --- a/include/asm-ia64/pgalloc.h +++ b/include/asm-ia64/pgalloc.h | |||
@@ -86,6 +86,25 @@ static inline void pgd_free(pgd_t * pgd) | |||
86 | pgtable_quicklist_free(pgd); | 86 | pgtable_quicklist_free(pgd); |
87 | } | 87 | } |
88 | 88 | ||
89 | #ifdef CONFIG_PGTABLE_4 | ||
90 | static inline void | ||
91 | pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) | ||
92 | { | ||
93 | pgd_val(*pgd_entry) = __pa(pud); | ||
94 | } | ||
95 | |||
96 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
97 | { | ||
98 | return pgtable_quicklist_alloc(); | ||
99 | } | ||
100 | |||
101 | static inline void pud_free(pud_t * pud) | ||
102 | { | ||
103 | pgtable_quicklist_free(pud); | ||
104 | } | ||
105 | #define __pud_free_tlb(tlb, pud) pud_free(pud) | ||
106 | #endif /* CONFIG_PGTABLE_4 */ | ||
107 | |||
89 | static inline void | 108 | static inline void |
90 | pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) | 109 | pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) |
91 | { | 110 | { |
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 3339c7b55a6f..e2560c58384b 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h | |||
@@ -84,32 +84,55 @@ | |||
84 | #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED | 84 | #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Definitions for first level: | 87 | * How many pointers will a page table level hold expressed in shift |
88 | * | ||
89 | * PGDIR_SHIFT determines what a first-level page table entry can map. | ||
90 | */ | 88 | */ |
91 | #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) | 89 | #define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3) |
92 | #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) | ||
93 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
94 | #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) | ||
95 | #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ | ||
96 | #define FIRST_USER_ADDRESS 0 | ||
97 | 90 | ||
98 | /* | 91 | /* |
99 | * Definitions for second level: | 92 | * Definitions for fourth level: |
93 | */ | ||
94 | #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) | ||
95 | |||
96 | /* | ||
97 | * Definitions for third level: | ||
100 | * | 98 | * |
101 | * PMD_SHIFT determines the size of the area a second-level page table | 99 | * PMD_SHIFT determines the size of the area a third-level page table |
102 | * can map. | 100 | * can map. |
103 | */ | 101 | */ |
104 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) | 102 | #define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT)) |
105 | #define PMD_SIZE (1UL << PMD_SHIFT) | 103 | #define PMD_SIZE (1UL << PMD_SHIFT) |
106 | #define PMD_MASK (~(PMD_SIZE-1)) | 104 | #define PMD_MASK (~(PMD_SIZE-1)) |
107 | #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) | 105 | #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) |
108 | 106 | ||
107 | #ifdef CONFIG_PGTABLE_4 | ||
109 | /* | 108 | /* |
110 | * Definitions for third level: | 109 | * Definitions for second level: |
110 | * | ||
111 | * PUD_SHIFT determines the size of the area a second-level page table | ||
112 | * can map. | ||
113 | */ | ||
114 | #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) | ||
115 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
116 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
117 | #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) | ||
118 | #endif | ||
119 | |||
120 | /* | ||
121 | * Definitions for first level: | ||
122 | * | ||
123 | * PGDIR_SHIFT determines what a first-level page table entry can map. | ||
111 | */ | 124 | */ |
112 | #define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3)) | 125 | #ifdef CONFIG_PGTABLE_4 |
126 | #define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) | ||
127 | #else | ||
128 | #define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) | ||
129 | #endif | ||
130 | #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) | ||
131 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
132 | #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT | ||
133 | #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) | ||
134 | #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ | ||
135 | #define FIRST_USER_ADDRESS 0 | ||
113 | 136 | ||
114 | /* | 137 | /* |
115 | * All the normal masks have the "page accessed" bits on, as any time | 138 | * All the normal masks have the "page accessed" bits on, as any time |
@@ -127,6 +150,7 @@ | |||
127 | 150 | ||
128 | # ifndef __ASSEMBLY__ | 151 | # ifndef __ASSEMBLY__ |
129 | 152 | ||
153 | #include <linux/sched.h> /* for mm_struct */ | ||
130 | #include <asm/bitops.h> | 154 | #include <asm/bitops.h> |
131 | #include <asm/cacheflush.h> | 155 | #include <asm/cacheflush.h> |
132 | #include <asm/mmu_context.h> | 156 | #include <asm/mmu_context.h> |
@@ -160,6 +184,9 @@ | |||
160 | #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) | 184 | #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) |
161 | 185 | ||
162 | #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) | 186 | #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) |
187 | #ifdef CONFIG_PGTABLE_4 | ||
188 | #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) | ||
189 | #endif | ||
163 | #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) | 190 | #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) |
164 | #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) | 191 | #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) |
165 | 192 | ||
@@ -217,6 +244,9 @@ ia64_phys_addr_valid (unsigned long addr) | |||
217 | #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) | 244 | #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) |
218 | #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) | 245 | #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) |
219 | 246 | ||
247 | #define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3) | ||
248 | #define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */ | ||
249 | |||
220 | /* | 250 | /* |
221 | * Conversion functions: convert page frame number (pfn) and a protection value to a page | 251 | * Conversion functions: convert page frame number (pfn) and a protection value to a page |
222 | * table entry (pte). | 252 | * table entry (pte). |
@@ -236,9 +266,6 @@ ia64_phys_addr_valid (unsigned long addr) | |||
236 | #define pte_modify(_pte, newprot) \ | 266 | #define pte_modify(_pte, newprot) \ |
237 | (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) | 267 | (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) |
238 | 268 | ||
239 | #define page_pte_prot(page,prot) mk_pte(page, prot) | ||
240 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) | ||
241 | |||
242 | #define pte_none(pte) (!pte_val(pte)) | 269 | #define pte_none(pte) (!pte_val(pte)) |
243 | #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) | 270 | #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) |
244 | #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) | 271 | #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) |
@@ -256,9 +283,16 @@ ia64_phys_addr_valid (unsigned long addr) | |||
256 | #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) | 283 | #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) |
257 | #define pud_present(pud) (pud_val(pud) != 0UL) | 284 | #define pud_present(pud) (pud_val(pud) != 0UL) |
258 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) | 285 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) |
259 | |||
260 | #define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) | 286 | #define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) |
261 | 287 | ||
288 | #ifdef CONFIG_PGTABLE_4 | ||
289 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
290 | #define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) | ||
291 | #define pgd_present(pgd) (pgd_val(pgd) != 0UL) | ||
292 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) | ||
293 | #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) | ||
294 | #endif | ||
295 | |||
262 | /* | 296 | /* |
263 | * The following have defined behavior only work if pte_present() is true. | 297 | * The following have defined behavior only work if pte_present() is true. |
264 | */ | 298 | */ |
@@ -326,7 +360,13 @@ pgd_offset (struct mm_struct *mm, unsigned long address) | |||
326 | here. */ | 360 | here. */ |
327 | #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) | 361 | #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) |
328 | 362 | ||
363 | #ifdef CONFIG_PGTABLE_4 | ||
329 | /* Find an entry in the second-level page table.. */ | 364 | /* Find an entry in the second-level page table.. */ |
365 | #define pud_offset(dir,addr) \ | ||
366 | ((pud_t *) pgd_page(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | ||
367 | #endif | ||
368 | |||
369 | /* Find an entry in the third-level page table.. */ | ||
330 | #define pmd_offset(dir,addr) \ | 370 | #define pmd_offset(dir,addr) \ |
331 | ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | 371 | ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
332 | 372 | ||
@@ -559,7 +599,9 @@ do { \ | |||
559 | #define __HAVE_ARCH_PGD_OFFSET_GATE | 599 | #define __HAVE_ARCH_PGD_OFFSET_GATE |
560 | #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE | 600 | #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE |
561 | 601 | ||
602 | #ifndef CONFIG_PGTABLE_4 | ||
562 | #include <asm-generic/pgtable-nopud.h> | 603 | #include <asm-generic/pgtable-nopud.h> |
604 | #endif | ||
563 | #include <asm-generic/pgtable.h> | 605 | #include <asm-generic/pgtable.h> |
564 | 606 | ||
565 | #endif /* _ASM_IA64_PGTABLE_H */ | 607 | #endif /* _ASM_IA64_PGTABLE_H */ |
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index a79d1a7ecc77..2c703d6e0c86 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h | |||
@@ -229,6 +229,9 @@ struct switch_stack { | |||
229 | }; | 229 | }; |
230 | 230 | ||
231 | #ifdef __KERNEL__ | 231 | #ifdef __KERNEL__ |
232 | |||
233 | #define __ARCH_SYS_PTRACE 1 | ||
234 | |||
232 | /* | 235 | /* |
233 | * We use the ia64_psr(regs)->ri to determine which of the three | 236 | * We use the ia64_psr(regs)->ri to determine which of the three |
234 | * instructions in bundle (16 bytes) took the sample. Generate | 237 | * instructions in bundle (16 bytes) took the sample. Generate |
diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h index e18b5ab0cb75..1327c91ea39c 100644 --- a/include/asm-ia64/rwsem.h +++ b/include/asm-ia64/rwsem.h | |||
@@ -186,4 +186,9 @@ __downgrade_write (struct rw_semaphore *sem) | |||
186 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) | 186 | #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count)) |
187 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) | 187 | #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count)) |
188 | 188 | ||
189 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
190 | { | ||
191 | return (sem->count != 0); | ||
192 | } | ||
193 | |||
189 | #endif /* _ASM_IA64_RWSEM_H */ | 194 | #endif /* _ASM_IA64_RWSEM_H */ |
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index 3a2f0f3f78f3..bb8906285fab 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h | |||
@@ -24,8 +24,6 @@ struct semaphore { | |||
24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | 24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ |
25 | } | 25 | } |
26 | 26 | ||
27 | #define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1) | ||
28 | |||
29 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | 27 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ |
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | 28 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) |
31 | 29 | ||
diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h index ab827d298569..1a3831c04af6 100644 --- a/include/asm-ia64/sn/arch.h +++ b/include/asm-ia64/sn/arch.h | |||
@@ -18,6 +18,32 @@ | |||
18 | #include <asm/sn/sn_cpuid.h> | 18 | #include <asm/sn/sn_cpuid.h> |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * This is the maximum number of NUMALINK nodes that can be part of a single | ||
22 | * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in | ||
23 | * remote partitions are NOT included in this number. | ||
24 | * The number of compact nodes cannot exceed size of a coherency domain. | ||
25 | * The purpose of this define is to specify a node count that includes | ||
26 | * all C/M/TIO nodes in an SSI system. | ||
27 | * | ||
28 | * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes. | ||
29 | * | ||
30 | * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade | ||
31 | * to ACPI3.0, this limit will be removed. The notion of "compact nodes" | ||
32 | * should be deleted and TIOs should be included in MAX_NUMNODES. | ||
33 | */ | ||
34 | #define MAX_COMPACT_NODES 512 | ||
35 | |||
36 | /* | ||
37 | * Maximum number of nodes in all partitions and in all coherency domains. | ||
38 | * This is the total number of nodes accessible in the numalink fabric. It | ||
39 | * includes all C & M bricks, plus all TIOs. | ||
40 | * | ||
41 | * This value is also the value of the maximum number of NASIDs in the numalink | ||
42 | * fabric. | ||
43 | */ | ||
44 | #define MAX_NUMALINK_NODES 16384 | ||
45 | |||
46 | /* | ||
21 | * The following defines attributes of the HUB chip. These attributes are | 47 | * The following defines attributes of the HUB chip. These attributes are |
22 | * frequently referenced. They are kept in the per-cpu data areas of each cpu. | 48 | * frequently referenced. They are kept in the per-cpu data areas of each cpu. |
23 | * They are kept together in a struct to minimize cache misses. | 49 | * They are kept together in a struct to minimize cache misses. |
@@ -41,15 +67,6 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | |||
41 | 67 | ||
42 | 68 | ||
43 | /* | 69 | /* |
44 | * This is the maximum number of nodes that can be part of a kernel. | ||
45 | * Effectively, it's the maximum number of compact node ids (cnodeid_t). | ||
46 | * This is not necessarily the same as MAX_NASIDS. | ||
47 | */ | ||
48 | #define MAX_COMPACT_NODES 2048 | ||
49 | #define CPUS_PER_NODE 4 | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Compact node ID to nasid mappings kept in the per-cpu data areas of each | 70 | * Compact node ID to nasid mappings kept in the per-cpu data areas of each |
54 | * cpu. | 71 | * cpu. |
55 | */ | 72 | */ |
@@ -57,7 +74,6 @@ DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); | |||
57 | #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) | 74 | #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) |
58 | 75 | ||
59 | 76 | ||
60 | |||
61 | extern u8 sn_partition_id; | 77 | extern u8 sn_partition_id; |
62 | extern u8 sn_system_size; | 78 | extern u8 sn_system_size; |
63 | extern u8 sn_sharing_domain_size; | 79 | extern u8 sn_sharing_domain_size; |
diff --git a/include/asm-ia64/sn/io.h b/include/asm-ia64/sn/io.h index 42209733f6b1..41c73a735628 100644 --- a/include/asm-ia64/sn/io.h +++ b/include/asm-ia64/sn/io.h | |||
@@ -14,7 +14,7 @@ | |||
14 | extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */ | 14 | extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */ |
15 | extern void __sn_mmiowb(void); /* Forward definition */ | 15 | extern void __sn_mmiowb(void); /* Forward definition */ |
16 | 16 | ||
17 | extern int numionodes; | 17 | extern int num_cnodes; |
18 | 18 | ||
19 | #define __sn_mf_a() ia64_mfa() | 19 | #define __sn_mf_a() ia64_mfa() |
20 | 20 | ||
@@ -36,6 +36,15 @@ extern void sn_dma_flush(unsigned long); | |||
36 | #define __sn_readq_relaxed ___sn_readq_relaxed | 36 | #define __sn_readq_relaxed ___sn_readq_relaxed |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Convenience macros for setting/clearing bits using the above accessors | ||
40 | */ | ||
41 | |||
42 | #define __sn_setq_relaxed(addr, val) \ | ||
43 | writeq((__sn_readq_relaxed(addr) | (val)), (addr)) | ||
44 | #define __sn_clrq_relaxed(addr, val) \ | ||
45 | writeq((__sn_readq_relaxed(addr) & ~(val)), (addr)) | ||
46 | |||
47 | /* | ||
39 | * The following routines are SN Platform specific, called when | 48 | * The following routines are SN Platform specific, called when |
40 | * a reference is made to inX/outX set macros. SN Platform | 49 | * a reference is made to inX/outX set macros. SN Platform |
41 | * inX set of macros ensures that Posted DMA writes on the | 50 | * inX set of macros ensures that Posted DMA writes on the |
diff --git a/include/asm-ia64/sn/klconfig.h b/include/asm-ia64/sn/klconfig.h index 9f920c70a62a..bcbf209d63be 100644 --- a/include/asm-ia64/sn/klconfig.h +++ b/include/asm-ia64/sn/klconfig.h | |||
@@ -208,19 +208,6 @@ typedef struct lboard_s { | |||
208 | klconf_off_t brd_next_same; /* Next BOARD with same nasid */ | 208 | klconf_off_t brd_next_same; /* Next BOARD with same nasid */ |
209 | } lboard_t; | 209 | } lboard_t; |
210 | 210 | ||
211 | #define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts) | ||
212 | #define NODE_OFFSET_TO_KLINFO(n,off) ((klinfo_t*) TO_NODE_CAC(n,off)) | ||
213 | #define KLCF_NEXT(_brd) \ | ||
214 | ((_brd)->brd_next_same ? \ | ||
215 | (NODE_OFFSET_TO_LBOARD((_brd)->brd_next_same_host, (_brd)->brd_next_same)): NULL) | ||
216 | #define KLCF_NEXT_ANY(_brd) \ | ||
217 | ((_brd)->brd_next_any ? \ | ||
218 | (NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next_any)): NULL) | ||
219 | #define KLCF_COMP(_brd, _ndx) \ | ||
220 | ((((_brd)->brd_compts[(_ndx)]) == 0) ? 0 : \ | ||
221 | (NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)]))) | ||
222 | |||
223 | |||
224 | /* | 211 | /* |
225 | * Generic info structure. This stores common info about a | 212 | * Generic info structure. This stores common info about a |
226 | * component. | 213 | * component. |
@@ -249,24 +236,11 @@ typedef struct klinfo_s { /* Generic info */ | |||
249 | } klinfo_t ; | 236 | } klinfo_t ; |
250 | 237 | ||
251 | 238 | ||
252 | static inline lboard_t *find_lboard_any(lboard_t * start, unsigned char brd_type) | 239 | static inline lboard_t *find_lboard_next(lboard_t * brd) |
253 | { | 240 | { |
254 | /* Search all boards stored on this node. */ | 241 | if (brd && brd->brd_next_any) |
255 | 242 | return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any); | |
256 | while (start) { | 243 | return NULL; |
257 | if (start->brd_type == brd_type) | ||
258 | return start; | ||
259 | start = KLCF_NEXT_ANY(start); | ||
260 | } | ||
261 | /* Didn't find it. */ | ||
262 | return (lboard_t *) NULL; | ||
263 | } | 244 | } |
264 | 245 | ||
265 | |||
266 | /* external declarations of Linux kernel functions. */ | ||
267 | |||
268 | extern lboard_t *root_lboard[]; | ||
269 | extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type); | ||
270 | extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type); | ||
271 | |||
272 | #endif /* _ASM_IA64_SN_KLCONFIG_H */ | 246 | #endif /* _ASM_IA64_SN_KLCONFIG_H */ |
diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h index 2e5f0aa38889..e3b819110d47 100644 --- a/include/asm-ia64/sn/l1.h +++ b/include/asm-ia64/sn/l1.h | |||
@@ -35,4 +35,16 @@ | |||
35 | #define L1_BRICKTYPE_ATHENA 0x2b /* + */ | 35 | #define L1_BRICKTYPE_ATHENA 0x2b /* + */ |
36 | #define L1_BRICKTYPE_DAYTONA 0x7a /* z */ | 36 | #define L1_BRICKTYPE_DAYTONA 0x7a /* z */ |
37 | 37 | ||
38 | /* board type response codes */ | ||
39 | #define L1_BOARDTYPE_IP69 0x0100 /* CA */ | ||
40 | #define L1_BOARDTYPE_IP63 0x0200 /* CB */ | ||
41 | #define L1_BOARDTYPE_BASEIO 0x0300 /* IB */ | ||
42 | #define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */ | ||
43 | #define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */ | ||
44 | #define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */ | ||
45 | #define L1_BOARDTYPE_ABACUS 0x0700 /* AB */ | ||
46 | #define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */ | ||
47 | #define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */ | ||
48 | |||
49 | |||
38 | #endif /* _ASM_IA64_SN_L1_H */ | 50 | #endif /* _ASM_IA64_SN_L1_H */ |
diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h index 47bb8100fd00..6f6d69e39ff5 100644 --- a/include/asm-ia64/sn/nodepda.h +++ b/include/asm-ia64/sn/nodepda.h | |||
@@ -55,7 +55,6 @@ struct nodepda_s { | |||
55 | */ | 55 | */ |
56 | struct phys_cpuid phys_cpuid[NR_CPUS]; | 56 | struct phys_cpuid phys_cpuid[NR_CPUS]; |
57 | spinlock_t ptc_lock ____cacheline_aligned_in_smp; | 57 | spinlock_t ptc_lock ____cacheline_aligned_in_smp; |
58 | spinlock_t bist_lock; | ||
59 | }; | 58 | }; |
60 | 59 | ||
61 | typedef struct nodepda_s nodepda_t; | 60 | typedef struct nodepda_s nodepda_t; |
diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h index d2c1d34dcce4..749deb2ca6c1 100644 --- a/include/asm-ia64/sn/sn_cpuid.h +++ b/include/asm-ia64/sn/sn_cpuid.h | |||
@@ -105,7 +105,6 @@ extern short physical_node_map[]; /* indexed by nasid to get cnode */ | |||
105 | #define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid) | 105 | #define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid) |
106 | #define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode) | 106 | #define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode) |
107 | #define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice) | 107 | #define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice) |
108 | #define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)]) | ||
109 | 108 | ||
110 | 109 | ||
111 | /* | 110 | /* |
@@ -113,8 +112,6 @@ extern short physical_node_map[]; /* indexed by nasid to get cnode */ | |||
113 | * of potentially large tables. | 112 | * of potentially large tables. |
114 | */ | 113 | */ |
115 | extern int nasid_slice_to_cpuid(int, int); | 114 | extern int nasid_slice_to_cpuid(int, int); |
116 | #define nasid_slice_to_cpu_physical_id(nasid, slice) \ | ||
117 | cpu_physical_id(nasid_slice_to_cpuid(nasid, slice)) | ||
118 | 115 | ||
119 | /* | 116 | /* |
120 | * cnodeid_to_nasid - convert a cnodeid to a NASID | 117 | * cnodeid_to_nasid - convert a cnodeid to a NASID |
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index fea35b33d4e4..2a8b0d92a5d6 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
@@ -47,6 +47,7 @@ | |||
47 | #define SN_SAL_CONSOLE_PUTB 0x02000028 | 47 | #define SN_SAL_CONSOLE_PUTB 0x02000028 |
48 | #define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a | 48 | #define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a |
49 | #define SN_SAL_CONSOLE_READC 0x0200002b | 49 | #define SN_SAL_CONSOLE_READC 0x0200002b |
50 | #define SN_SAL_SYSCTL_OP 0x02000030 | ||
50 | #define SN_SAL_SYSCTL_MODID_GET 0x02000031 | 51 | #define SN_SAL_SYSCTL_MODID_GET 0x02000031 |
51 | #define SN_SAL_SYSCTL_GET 0x02000032 | 52 | #define SN_SAL_SYSCTL_GET 0x02000032 |
52 | #define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033 | 53 | #define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033 |
@@ -55,6 +56,7 @@ | |||
55 | #define SN_SAL_BUS_CONFIG 0x02000037 | 56 | #define SN_SAL_BUS_CONFIG 0x02000037 |
56 | #define SN_SAL_SYS_SERIAL_GET 0x02000038 | 57 | #define SN_SAL_SYS_SERIAL_GET 0x02000038 |
57 | #define SN_SAL_PARTITION_SERIAL_GET 0x02000039 | 58 | #define SN_SAL_PARTITION_SERIAL_GET 0x02000039 |
59 | #define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a | ||
58 | #define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b | 60 | #define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b |
59 | #define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c | 61 | #define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c |
60 | #define SN_SAL_COHERENCE 0x0200003d | 62 | #define SN_SAL_COHERENCE 0x0200003d |
@@ -67,7 +69,7 @@ | |||
67 | #define SN_SAL_IOIF_INTERRUPT 0x0200004a | 69 | #define SN_SAL_IOIF_INTERRUPT 0x0200004a |
68 | #define SN_SAL_HWPERF_OP 0x02000050 // lock | 70 | #define SN_SAL_HWPERF_OP 0x02000050 // lock |
69 | #define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051 | 71 | #define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051 |
70 | 72 | #define SN_SAL_IOIF_PCI_SAFE 0x02000052 | |
71 | #define SN_SAL_IOIF_SLOT_ENABLE 0x02000053 | 73 | #define SN_SAL_IOIF_SLOT_ENABLE 0x02000053 |
72 | #define SN_SAL_IOIF_SLOT_DISABLE 0x02000054 | 74 | #define SN_SAL_IOIF_SLOT_DISABLE 0x02000054 |
73 | #define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055 | 75 | #define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055 |
@@ -101,6 +103,13 @@ | |||
101 | #define SAL_INTR_FREE 2 | 103 | #define SAL_INTR_FREE 2 |
102 | 104 | ||
103 | /* | 105 | /* |
106 | * operations available on the generic SN_SAL_SYSCTL_OP | ||
107 | * runtime service | ||
108 | */ | ||
109 | #define SAL_SYSCTL_OP_IOBOARD 0x0001 /* retrieve board type */ | ||
110 | #define SAL_SYSCTL_OP_TIO_JLCK_RST 0x0002 /* issue TIO clock reset */ | ||
111 | |||
112 | /* | ||
104 | * IRouter (i.e. generalized system controller) operations | 113 | * IRouter (i.e. generalized system controller) operations |
105 | */ | 114 | */ |
106 | #define SAL_IROUTER_OPEN 0 /* open a subchannel */ | 115 | #define SAL_IROUTER_OPEN 0 /* open a subchannel */ |
@@ -198,26 +207,16 @@ ia64_sn_get_master_baseio_nasid(void) | |||
198 | return ret_stuff.v0; | 207 | return ret_stuff.v0; |
199 | } | 208 | } |
200 | 209 | ||
201 | static inline char * | 210 | static inline void * |
202 | ia64_sn_get_klconfig_addr(nasid_t nasid) | 211 | ia64_sn_get_klconfig_addr(nasid_t nasid) |
203 | { | 212 | { |
204 | struct ia64_sal_retval ret_stuff; | 213 | struct ia64_sal_retval ret_stuff; |
205 | int cnodeid; | ||
206 | 214 | ||
207 | cnodeid = nasid_to_cnodeid(nasid); | ||
208 | ret_stuff.status = 0; | 215 | ret_stuff.status = 0; |
209 | ret_stuff.v0 = 0; | 216 | ret_stuff.v0 = 0; |
210 | ret_stuff.v1 = 0; | 217 | ret_stuff.v1 = 0; |
211 | ret_stuff.v2 = 0; | 218 | ret_stuff.v2 = 0; |
212 | SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0); | 219 | SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0); |
213 | |||
214 | /* | ||
215 | * We should panic if a valid cnode nasid does not produce | ||
216 | * a klconfig address. | ||
217 | */ | ||
218 | if (ret_stuff.status != 0) { | ||
219 | panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", ret_stuff.status); | ||
220 | } | ||
221 | return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL; | 220 | return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL; |
222 | } | 221 | } |
223 | 222 | ||
@@ -583,6 +582,21 @@ sn_partition_serial_number_val(void) { | |||
583 | } | 582 | } |
584 | 583 | ||
585 | /* | 584 | /* |
585 | * Returns the partition id of the nasid passed in as an argument, | ||
586 | * or INVALID_PARTID if the partition id cannot be retrieved. | ||
587 | */ | ||
588 | static inline partid_t | ||
589 | ia64_sn_sysctl_partition_get(nasid_t nasid) | ||
590 | { | ||
591 | struct ia64_sal_retval ret_stuff; | ||
592 | SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, | ||
593 | 0, 0, 0, 0, 0, 0); | ||
594 | if (ret_stuff.status != 0) | ||
595 | return -1; | ||
596 | return ((partid_t)ret_stuff.v0); | ||
597 | } | ||
598 | |||
599 | /* | ||
586 | * Returns the physical address of the partition's reserved page through | 600 | * Returns the physical address of the partition's reserved page through |
587 | * an iterative number of calls. | 601 | * an iterative number of calls. |
588 | * | 602 | * |
@@ -694,12 +708,10 @@ sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) | |||
694 | unsigned long irq_flags; | 708 | unsigned long irq_flags; |
695 | 709 | ||
696 | cnodeid = nasid_to_cnodeid(get_node_number(paddr)); | 710 | cnodeid = nasid_to_cnodeid(get_node_number(paddr)); |
697 | // spin_lock(&NODEPDA(cnodeid)->bist_lock); | ||
698 | local_irq_save(irq_flags); | 711 | local_irq_save(irq_flags); |
699 | ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, | 712 | ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len, |
700 | (u64)nasid_array, perms, 0, 0, 0); | 713 | (u64)nasid_array, perms, 0, 0, 0); |
701 | local_irq_restore(irq_flags); | 714 | local_irq_restore(irq_flags); |
702 | // spin_unlock(&NODEPDA(cnodeid)->bist_lock); | ||
703 | return ret_stuff.status; | 715 | return ret_stuff.status; |
704 | } | 716 | } |
705 | #define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 | 717 | #define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 |
@@ -873,6 +885,41 @@ ia64_sn_sysctl_event_init(nasid_t nasid) | |||
873 | return (int) rv.v0; | 885 | return (int) rv.v0; |
874 | } | 886 | } |
875 | 887 | ||
888 | /* | ||
889 | * Ask the system controller on the specified nasid to reset | ||
890 | * the CX corelet clock. Only valid on TIO nodes. | ||
891 | */ | ||
892 | static inline int | ||
893 | ia64_sn_sysctl_tio_clock_reset(nasid_t nasid) | ||
894 | { | ||
895 | struct ia64_sal_retval rv; | ||
896 | SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_TIO_JLCK_RST, | ||
897 | nasid, 0, 0, 0, 0, 0); | ||
898 | if (rv.status != 0) | ||
899 | return (int)rv.status; | ||
900 | if (rv.v0 != 0) | ||
901 | return (int)rv.v0; | ||
902 | |||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | /* | ||
907 | * Get the associated ioboard type for a given nasid. | ||
908 | */ | ||
909 | static inline int | ||
910 | ia64_sn_sysctl_ioboard_get(nasid_t nasid) | ||
911 | { | ||
912 | struct ia64_sal_retval rv; | ||
913 | SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD, | ||
914 | nasid, 0, 0, 0, 0, 0); | ||
915 | if (rv.v0 != 0) | ||
916 | return (int)rv.v0; | ||
917 | if (rv.v1 != 0) | ||
918 | return (int)rv.v1; | ||
919 | |||
920 | return 0; | ||
921 | } | ||
922 | |||
876 | /** | 923 | /** |
877 | * ia64_sn_get_fit_compt - read a FIT entry from the PROM header | 924 | * ia64_sn_get_fit_compt - read a FIT entry from the PROM header |
878 | * @nasid: NASID of node to read | 925 | * @nasid: NASID of node to read |
@@ -987,6 +1034,24 @@ ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, | |||
987 | ret_stuff.v2 = 0; | 1034 | ret_stuff.v2 = 0; |
988 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0); | 1035 | SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0); |
989 | 1036 | ||
1037 | /***** BEGIN HACK - temp til old proms no longer supported ********/ | ||
1038 | if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { | ||
1039 | int nasid = get_sapicid() & 0xfff;; | ||
1040 | #define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL | ||
1041 | #define SH_SHUB_ID_NODES_PER_BIT_SHFT 48 | ||
1042 | if (shubtype) *shubtype = 0; | ||
1043 | if (nasid_bitmask) *nasid_bitmask = 0x7ff; | ||
1044 | if (nasid_shift) *nasid_shift = 38; | ||
1045 | if (systemsize) *systemsize = 10; | ||
1046 | if (sharing_domain_size) *sharing_domain_size = 8; | ||
1047 | if (partid) *partid = ia64_sn_sysctl_partition_get(nasid); | ||
1048 | if (coher) *coher = nasid >> 9; | ||
1049 | if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >> | ||
1050 | SH_SHUB_ID_NODES_PER_BIT_SHFT; | ||
1051 | return 0; | ||
1052 | } | ||
1053 | /***** END HACK *******/ | ||
1054 | |||
990 | if (ret_stuff.status < 0) | 1055 | if (ret_stuff.status < 0) |
991 | return ret_stuff.status; | 1056 | return ret_stuff.status; |
992 | 1057 | ||
diff --git a/include/asm-ia64/sn/tioca_provider.h b/include/asm-ia64/sn/tioca_provider.h index 5ccec608d325..b532ef6148ed 100644 --- a/include/asm-ia64/sn/tioca_provider.h +++ b/include/asm-ia64/sn/tioca_provider.h | |||
@@ -182,11 +182,11 @@ tioca_tlbflush(struct tioca_kernel *tioca_kernel) | |||
182 | * touch every CL aligned GART entry. | 182 | * touch every CL aligned GART entry. |
183 | */ | 183 | */ |
184 | 184 | ||
185 | ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM); | 185 | __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); |
186 | ca_base->ca_control2 |= CA_GART_FLUSH_TLB; | 186 | __sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB); |
187 | ca_base->ca_control2 |= | 187 | __sn_setq_relaxed(&ca_base->ca_control2, |
188 | (0x2ull << CA_GART_MEM_PARAM_SHFT); | 188 | (0x2ull << CA_GART_MEM_PARAM_SHFT)); |
189 | tmp = ca_base->ca_control2; | 189 | tmp = __sn_readq_relaxed(&ca_base->ca_control2); |
190 | } | 190 | } |
191 | 191 | ||
192 | return; | 192 | return; |
@@ -196,8 +196,8 @@ tioca_tlbflush(struct tioca_kernel *tioca_kernel) | |||
196 | * Gart in uncached mode ... need an explicit flush. | 196 | * Gart in uncached mode ... need an explicit flush. |
197 | */ | 197 | */ |
198 | 198 | ||
199 | ca_base->ca_control2 |= CA_GART_FLUSH_TLB; | 199 | __sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB); |
200 | tmp = ca_base->ca_control2; | 200 | tmp = __sn_readq_relaxed(&ca_base->ca_control2); |
201 | } | 201 | } |
202 | 202 | ||
203 | extern uint32_t tioca_gart_found; | 203 | extern uint32_t tioca_gart_found; |
diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h index 22879853e46c..ecaddf960086 100644 --- a/include/asm-ia64/sn/tioce.h +++ b/include/asm-ia64/sn/tioce.h | |||
@@ -1,22 +1,10 @@ | |||
1 | /************************************************************************** | 1 | /* |
2 | * * | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * Unpublished copyright (c) 2005, Silicon Graphics, Inc. * | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * THIS IS UNPUBLISHED CONFIDENTIAL AND PROPRIETARY SOURCE CODE OF SGI. * | 4 | * for more details. |
5 | * * | 5 | * |
6 | * The copyright notice above does not evidence any actual or intended * | 6 | * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved. |
7 | * publication or disclosure of this source code, which includes * | 7 | */ |
8 | * information that is confidential and/or proprietary, and is a trade * | ||
9 | * secret, of Silicon Graphics, Inc. ANY REPRODUCTION, MODIFICATION, * | ||
10 | * DISTRIBUTION, PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH * | ||
11 | * USE OF THIS SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF * | ||
12 | * SILICON GRAPHICS, INC. IS STRICTLY PROHIBITED, AND IN VIOLATION OF * | ||
13 | * APPLICABLE LAWS AND INTERNATIONAL TREATIES. THE RECEIPT OR * | ||
14 | * POSSESSION OF THIS SOURCE CODE AND/OR RELATED INFORMATION DOES NOT * | ||
15 | * CONVEY OR IMPLY ANY RIGHTS TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS * | ||
16 | * CONTENTS, OR TO MANUFACTURE, USE, OR SELL ANYTHING THAT IT MAY * | ||
17 | * DESCRIBE, IN WHOLE OR IN PART. * | ||
18 | * * | ||
19 | **************************************************************************/ | ||
20 | 8 | ||
21 | #ifndef __ASM_IA64_SN_TIOCE_H__ | 9 | #ifndef __ASM_IA64_SN_TIOCE_H__ |
22 | #define __ASM_IA64_SN_TIOCE_H__ | 10 | #define __ASM_IA64_SN_TIOCE_H__ |
diff --git a/include/asm-ia64/sn/tioce_provider.h b/include/asm-ia64/sn/tioce_provider.h index 7f63dec0a79a..cb414908671d 100644 --- a/include/asm-ia64/sn/tioce_provider.h +++ b/include/asm-ia64/sn/tioce_provider.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /************************************************************************** | 1 | /* |
2 | * Copyright (C) 2005, Silicon Graphics, Inc. * | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * * | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * These coded instructions, statements, and computer programs contain * | 4 | * for more details. |
5 | * unpublished proprietary information of Silicon Graphics, Inc., and * | 5 | * |
6 | * are protected by Federal copyright law. They may not be disclosed * | 6 | * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved. |
7 | * to third parties or copied or duplicated in any form, in whole or * | 7 | */ |
8 | * in part, without the prior written consent of Silicon Graphics, Inc. * | ||
9 | * * | ||
10 | **************************************************************************/ | ||
11 | 8 | ||
12 | #ifndef _ASM_IA64_SN_CE_PROVIDER_H | 9 | #ifndef _ASM_IA64_SN_CE_PROVIDER_H |
13 | #define _ASM_IA64_SN_CE_PROVIDER_H | 10 | #define _ASM_IA64_SN_CE_PROVIDER_H |
diff --git a/include/asm-ia64/sn/tiocx.h b/include/asm-ia64/sn/tiocx.h index c5447a504509..5699e75e5024 100644 --- a/include/asm-ia64/sn/tiocx.h +++ b/include/asm-ia64/sn/tiocx.h | |||
@@ -19,6 +19,7 @@ struct cx_id_s { | |||
19 | 19 | ||
20 | struct cx_dev { | 20 | struct cx_dev { |
21 | struct cx_id_s cx_id; | 21 | struct cx_id_s cx_id; |
22 | int bt; /* board/blade type */ | ||
22 | void *soft; /* driver specific */ | 23 | void *soft; /* driver specific */ |
23 | struct hubdev_info *hubdev; | 24 | struct hubdev_info *hubdev; |
24 | struct device dev; | 25 | struct device dev; |
@@ -59,7 +60,7 @@ struct cx_drv { | |||
59 | extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int); | 60 | extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int); |
60 | extern void tiocx_irq_free(struct sn_irq_info *); | 61 | extern void tiocx_irq_free(struct sn_irq_info *); |
61 | extern int cx_device_unregister(struct cx_dev *); | 62 | extern int cx_device_unregister(struct cx_dev *); |
62 | extern int cx_device_register(nasid_t, int, int, struct hubdev_info *); | 63 | extern int cx_device_register(nasid_t, int, int, struct hubdev_info *, int); |
63 | extern int cx_driver_unregister(struct cx_drv *); | 64 | extern int cx_driver_unregister(struct cx_drv *); |
64 | extern int cx_driver_register(struct cx_drv *); | 65 | extern int cx_driver_register(struct cx_drv *); |
65 | extern uint64_t tiocx_dma_addr(uint64_t addr); | 66 | extern uint64_t tiocx_dma_addr(uint64_t addr); |
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h index 1df1c9f61a65..49faf8f26430 100644 --- a/include/asm-ia64/sn/xp.h +++ b/include/asm-ia64/sn/xp.h | |||
@@ -49,7 +49,7 @@ | |||
49 | * C-brick nasids, thus the need for bitmaps which don't account for | 49 | * C-brick nasids, thus the need for bitmaps which don't account for |
50 | * odd-numbered (non C-brick) nasids. | 50 | * odd-numbered (non C-brick) nasids. |
51 | */ | 51 | */ |
52 | #define XP_MAX_PHYSNODE_ID (MAX_PHYSNODE_ID / 2) | 52 | #define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2) |
53 | #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) | 53 | #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) |
54 | #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) | 54 | #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) |
55 | 55 | ||
@@ -217,7 +217,17 @@ enum xpc_retval { | |||
217 | xpcInvalidPartid, /* 42: invalid partition ID */ | 217 | xpcInvalidPartid, /* 42: invalid partition ID */ |
218 | xpcLocalPartid, /* 43: local partition ID */ | 218 | xpcLocalPartid, /* 43: local partition ID */ |
219 | 219 | ||
220 | xpcUnknownReason /* 44: unknown reason -- must be last in list */ | 220 | xpcOtherGoingDown, /* 44: other side going down, reason unknown */ |
221 | xpcSystemGoingDown, /* 45: system is going down, reason unknown */ | ||
222 | xpcSystemHalt, /* 46: system is being halted */ | ||
223 | xpcSystemReboot, /* 47: system is being rebooted */ | ||
224 | xpcSystemPoweroff, /* 48: system is being powered off */ | ||
225 | |||
226 | xpcDisconnecting, /* 49: channel disconnecting (closing) */ | ||
227 | |||
228 | xpcOpenCloseError, /* 50: channel open/close protocol error */ | ||
229 | |||
230 | xpcUnknownReason /* 51: unknown reason -- must be last in list */ | ||
221 | }; | 231 | }; |
222 | 232 | ||
223 | 233 | ||
@@ -342,7 +352,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid, | |||
342 | * | 352 | * |
343 | * The 'func' field points to the function to call when aynchronous | 353 | * The 'func' field points to the function to call when aynchronous |
344 | * notification is required for such events as: a connection established/lost, | 354 | * notification is required for such events as: a connection established/lost, |
345 | * or an incomming message received, or an error condition encountered. A | 355 | * or an incoming message received, or an error condition encountered. A |
346 | * non-NULL 'func' field indicates that there is an active registration for | 356 | * non-NULL 'func' field indicates that there is an active registration for |
347 | * the channel. | 357 | * the channel. |
348 | */ | 358 | */ |
diff --git a/include/asm-ia64/sparsemem.h b/include/asm-ia64/sparsemem.h new file mode 100644 index 000000000000..67a7c40ec27f --- /dev/null +++ b/include/asm-ia64/sparsemem.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ASM_IA64_SPARSEMEM_H | ||
2 | #define _ASM_IA64_SPARSEMEM_H | ||
3 | |||
4 | #ifdef CONFIG_SPARSEMEM | ||
5 | /* | ||
6 | * SECTION_SIZE_BITS 2^N: how big each section will be | ||
7 | * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space | ||
8 | */ | ||
9 | |||
10 | #define SECTION_SIZE_BITS (30) | ||
11 | #define MAX_PHYSMEM_BITS (50) | ||
12 | #ifdef CONFIG_FORCE_MAX_ZONEORDER | ||
13 | #if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS) | ||
14 | #undef SECTION_SIZE_BITS | ||
15 | #define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) | ||
16 | #endif | ||
17 | #endif | ||
18 | |||
19 | #endif /* CONFIG_SPARSEMEM */ | ||
20 | #endif /* _ASM_IA64_SPARSEMEM_H */ | ||
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index 3a9a6d1be75c..834370b9dea1 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h | |||
@@ -60,7 +60,6 @@ struct mmu_gather { | |||
60 | unsigned int nr; /* == ~0U => fast mode */ | 60 | unsigned int nr; /* == ~0U => fast mode */ |
61 | unsigned char fullmm; /* non-zero means full mm flush */ | 61 | unsigned char fullmm; /* non-zero means full mm flush */ |
62 | unsigned char need_flush; /* really unmapped some PTEs? */ | 62 | unsigned char need_flush; /* really unmapped some PTEs? */ |
63 | unsigned long freed; /* number of pages freed */ | ||
64 | unsigned long start_addr; | 63 | unsigned long start_addr; |
65 | unsigned long end_addr; | 64 | unsigned long end_addr; |
66 | struct page *pages[FREE_PTE_NR]; | 65 | struct page *pages[FREE_PTE_NR]; |
@@ -129,7 +128,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
129 | static inline struct mmu_gather * | 128 | static inline struct mmu_gather * |
130 | tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) | 129 | tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) |
131 | { | 130 | { |
132 | struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers); | 131 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
133 | 132 | ||
134 | tlb->mm = mm; | 133 | tlb->mm = mm; |
135 | /* | 134 | /* |
@@ -147,25 +146,17 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) | |||
147 | */ | 146 | */ |
148 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; | 147 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; |
149 | tlb->fullmm = full_mm_flush; | 148 | tlb->fullmm = full_mm_flush; |
150 | tlb->freed = 0; | ||
151 | tlb->start_addr = ~0UL; | 149 | tlb->start_addr = ~0UL; |
152 | return tlb; | 150 | return tlb; |
153 | } | 151 | } |
154 | 152 | ||
155 | /* | 153 | /* |
156 | * Called at the end of the shootdown operation to free up any resources that were | 154 | * Called at the end of the shootdown operation to free up any resources that were |
157 | * collected. The page table lock is still held at this point. | 155 | * collected. |
158 | */ | 156 | */ |
159 | static inline void | 157 | static inline void |
160 | tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | 158 | tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) |
161 | { | 159 | { |
162 | unsigned long freed = tlb->freed; | ||
163 | struct mm_struct *mm = tlb->mm; | ||
164 | unsigned long rss = get_mm_counter(mm, rss); | ||
165 | |||
166 | if (rss < freed) | ||
167 | freed = rss; | ||
168 | add_mm_counter(mm, rss, -freed); | ||
169 | /* | 160 | /* |
170 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and | 161 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and |
171 | * tlb->end_addr. | 162 | * tlb->end_addr. |
@@ -174,12 +165,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
174 | 165 | ||
175 | /* keep the page table cache within bounds */ | 166 | /* keep the page table cache within bounds */ |
176 | check_pgt_cache(); | 167 | check_pgt_cache(); |
177 | } | ||
178 | 168 | ||
179 | static inline unsigned int | 169 | put_cpu_var(mmu_gathers); |
180 | tlb_is_full_mm(struct mmu_gather *tlb) | ||
181 | { | ||
182 | return tlb->fullmm; | ||
183 | } | 170 | } |
184 | 171 | ||
185 | /* | 172 | /* |
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index b65c62702724..a35b323bae4c 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h | |||
@@ -51,6 +51,7 @@ flush_tlb_mm (struct mm_struct *mm) | |||
51 | if (!mm) | 51 | if (!mm) |
52 | return; | 52 | return; |
53 | 53 | ||
54 | set_bit(mm->context, ia64_ctx.flushmap); | ||
54 | mm->context = 0; | 55 | mm->context = 0; |
55 | 56 | ||
56 | if (atomic_read(&mm->mm_users) == 0) | 57 | if (atomic_read(&mm->mm_users) == 0) |
diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h index 3a7829bb5954..9adb51211c22 100644 --- a/include/asm-ia64/uaccess.h +++ b/include/asm-ia64/uaccess.h | |||
@@ -187,8 +187,8 @@ extern void __get_user_unknown (void); | |||
187 | ({ \ | 187 | ({ \ |
188 | const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ | 188 | const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ |
189 | __typeof__ (size) __gu_size = (size); \ | 189 | __typeof__ (size) __gu_size = (size); \ |
190 | long __gu_err = -EFAULT, __gu_val = 0; \ | 190 | long __gu_err = -EFAULT; \ |
191 | \ | 191 | unsigned long __gu_val = 0; \ |
192 | if (!check || __access_ok(__gu_ptr, size, segment)) \ | 192 | if (!check || __access_ok(__gu_ptr, size, segment)) \ |
193 | switch (__gu_size) { \ | 193 | switch (__gu_size) { \ |
194 | case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ | 194 | case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ |
@@ -240,13 +240,13 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use | |||
240 | static inline unsigned long | 240 | static inline unsigned long |
241 | __copy_to_user (void __user *to, const void *from, unsigned long count) | 241 | __copy_to_user (void __user *to, const void *from, unsigned long count) |
242 | { | 242 | { |
243 | return __copy_user(to, (void __user *) from, count); | 243 | return __copy_user(to, (__force void __user *) from, count); |
244 | } | 244 | } |
245 | 245 | ||
246 | static inline unsigned long | 246 | static inline unsigned long |
247 | __copy_from_user (void *to, const void __user *from, unsigned long count) | 247 | __copy_from_user (void *to, const void __user *from, unsigned long count) |
248 | { | 248 | { |
249 | return __copy_user((void __user *) to, from, count); | 249 | return __copy_user((__force void __user *) to, from, count); |
250 | } | 250 | } |
251 | 251 | ||
252 | #define __copy_to_user_inatomic __copy_to_user | 252 | #define __copy_to_user_inatomic __copy_to_user |
@@ -258,7 +258,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) | |||
258 | long __cu_len = (n); \ | 258 | long __cu_len = (n); \ |
259 | \ | 259 | \ |
260 | if (__access_ok(__cu_to, __cu_len, get_fs())) \ | 260 | if (__access_ok(__cu_to, __cu_len, get_fs())) \ |
261 | __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \ | 261 | __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ |
262 | __cu_len; \ | 262 | __cu_len; \ |
263 | }) | 263 | }) |
264 | 264 | ||
@@ -270,7 +270,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) | |||
270 | \ | 270 | \ |
271 | __chk_user_ptr(__cu_from); \ | 271 | __chk_user_ptr(__cu_from); \ |
272 | if (__access_ok(__cu_from, __cu_len, get_fs())) \ | 272 | if (__access_ok(__cu_from, __cu_len, get_fs())) \ |
273 | __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \ | 273 | __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ |
274 | __cu_len; \ | 274 | __cu_len; \ |
275 | }) | 275 | }) |
276 | 276 | ||
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index 3a0c69524656..6d96a67439be 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h | |||
@@ -383,8 +383,6 @@ struct sigaction; | |||
383 | long sys_execve(char __user *filename, char __user * __user *argv, | 383 | long sys_execve(char __user *filename, char __user * __user *argv, |
384 | char __user * __user *envp, struct pt_regs *regs); | 384 | char __user * __user *envp, struct pt_regs *regs); |
385 | asmlinkage long sys_pipe(void); | 385 | asmlinkage long sys_pipe(void); |
386 | asmlinkage long sys_ptrace(long request, pid_t pid, | ||
387 | unsigned long addr, unsigned long data); | ||
388 | asmlinkage long sys_rt_sigaction(int sig, | 386 | asmlinkage long sys_rt_sigaction(int sig, |
389 | const struct sigaction __user *act, | 387 | const struct sigaction __user *act, |
390 | struct sigaction __user *oact, | 388 | struct sigaction __user *oact, |