aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-18 17:25:40 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-18 17:25:40 -0500
commit62ae144f56b61f541193df4a6465c06ee7eb9096 (patch)
tree913cc058b162c48e6c72d2b016b9e44201b16cee /include
parentbcd039b230f738243193ef7dbb03298d967b8370 (diff)
parent2161558fa5bebfeb272493ae91e836b497029023 (diff)
Merge branch 'parisc' of master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
Diffstat (limited to 'include')
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h19
-rw-r--r--include/asm-parisc/tlbflush.h16
-rw-r--r--include/linux/mm.h2
5 files changed, 25 insertions, 24 deletions
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf22056..b0a30e2c9813 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
8#define _ASM_PARISC_IRQ_H 8#define _ASM_PARISC_IRQ_H
9 9
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/cpumask.h>
11#include <asm/types.h> 12#include <asm/types.h>
12 13
13#define NO_IRQ (-1) 14#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
49extern int txn_claim_irq(int); 50extern int txn_claim_irq(int);
50extern unsigned int txn_alloc_data(unsigned int); 51extern unsigned int txn_alloc_data(unsigned int);
51extern unsigned long txn_alloc_addr(unsigned int); 52extern unsigned long txn_alloc_addr(unsigned int);
53extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
52 54
53extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); 55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
54 56extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
56 57
57/* soft power switch support (power.c) */ 58/* soft power switch support (power.c) */
58extern struct tasklet_struct power_tasklet; 59extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540b..dbdbd2e9fdf9 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
29#define cpu_logical_map(cpu) (cpu) 29#define cpu_logical_map(cpu) (cpu)
30 30
31extern void smp_send_reschedule(int cpu); 31extern void smp_send_reschedule(int cpu);
32extern void smp_send_all_nop(void);
32 33
33#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
34 35
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
53 54
54#define raw_smp_processor_id() (current_thread_info()->cpu) 55#define raw_smp_processor_id() (current_thread_info()->cpu)
55 56
56#endif /* CONFIG_SMP */ 57#else /* CONFIG_SMP */
58
59static inline void smp_send_all_nop(void) { return; }
60
61#endif
57 62
58#define NO_PROC_ID 0xFF /* No processor magic marker */ 63#define NO_PROC_ID 0xFF /* No processor magic marker */
59#define ANY_PROC_ID 0xFF /* Any processor magic marker */ 64#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 7c3f406a746a..16c2ac075fc5 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define __raw_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *x) 18static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
19 unsigned long flags)
19{ 20{
20 volatile unsigned int *a; 21 volatile unsigned int *a;
21 22
22 mb(); 23 mb();
23 a = __ldcw_align(x); 24 a = __ldcw_align(x);
24 while (__ldcw(a) == 0) 25 while (__ldcw(a) == 0)
25 while (*a == 0); 26 while (*a == 0)
27 if (flags & PSW_SM_I) {
28 local_irq_enable();
29 cpu_relax();
30 local_irq_disable();
31 } else
32 cpu_relax();
26 mb(); 33 mb();
27} 34}
28 35
@@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
60 67
61static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 68static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
62{ 69{
63 unsigned long flags;
64 local_irq_save(flags);
65 __raw_spin_lock(&rw->lock); 70 __raw_spin_lock(&rw->lock);
66 71
67 rw->counter++; 72 rw->counter++;
68 73
69 __raw_spin_unlock(&rw->lock); 74 __raw_spin_unlock(&rw->lock);
70 local_irq_restore(flags);
71} 75}
72 76
73static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
74{ 78{
75 unsigned long flags;
76 local_irq_save(flags);
77 __raw_spin_lock(&rw->lock); 79 __raw_spin_lock(&rw->lock);
78 80
79 rw->counter--; 81 rw->counter--;
80 82
81 __raw_spin_unlock(&rw->lock); 83 __raw_spin_unlock(&rw->lock);
82 local_irq_restore(flags);
83} 84}
84 85
85/* write_lock is less trivial. We optimistically grab the lock and check 86/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index e97aa8d1eff5..c9ec39c6fc6c 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -12,21 +12,15 @@
12 * N class systems, only one PxTLB inter processor broadcast can be 12 * N class systems, only one PxTLB inter processor broadcast can be
13 * active at any one time on the Merced bus. This tlb purge 13 * active at any one time on the Merced bus. This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate 14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all SMP systems not just the N class. */ 15 * it on all SMP systems not just the N class. We also need to have
16#ifdef CONFIG_SMP 16 * preemption disabled on uniprocessor machines, and spin_lock does that
17 * nicely.
18 */
17extern spinlock_t pa_tlb_lock; 19extern spinlock_t pa_tlb_lock;
18 20
19#define purge_tlb_start(x) spin_lock(&pa_tlb_lock) 21#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
20#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 22#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
21 23
22#else
23
24#define purge_tlb_start(x) do { } while(0)
25#define purge_tlb_end(x) do { } while (0)
26
27#endif
28
29
30extern void flush_tlb_all(void); 24extern void flush_tlb_all(void);
31 25
32/* 26/*
@@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
88 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 82 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
89 flush_tlb_all(); 83 flush_tlb_all();
90 else { 84 else {
91 preempt_disable();
92 mtsp(vma->vm_mm->context,1); 85 mtsp(vma->vm_mm->context,1);
93 purge_tlb_start(); 86 purge_tlb_start();
94 if (split_tlb) { 87 if (split_tlb) {
@@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
102 pdtlb(start); 95 pdtlb(start);
103 start += PAGE_SIZE; 96 start += PAGE_SIZE;
104 } 97 }
105 preempt_enable();
106 } 98 }
107 purge_tlb_end(); 99 purge_tlb_end();
108 } 100 }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1013a42d10b1..0986d19be0b7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
940 940
941/* Do stack extension */ 941/* Do stack extension */
942extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 942extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
943#ifdef CONFIG_IA64
943extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 944extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
945#endif
944 946
945/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 947/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
946extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 948extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);