aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/dma-mapping.h2
-rw-r--r--include/asm-sparc64/pgtable.h3
-rw-r--r--include/asm-sparc64/rwsem.h5
-rw-r--r--include/asm-sparc64/semaphore.h3
-rw-r--r--include/asm-sparc64/tlb.h29
5 files changed, 14 insertions, 28 deletions
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 1c5da41653a4..c7d5804ba76d 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -10,7 +10,7 @@
10struct device; 10struct device;
11 11
12static inline void *dma_alloc_coherent(struct device *dev, size_t size, 12static inline void *dma_alloc_coherent(struct device *dev, size_t size,
13 dma_addr_t *dma_handle, int flag) 13 dma_addr_t *dma_handle, gfp_t flag)
14{ 14{
15 BUG(); 15 BUG();
16 return NULL; 16 return NULL;
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 8c6dfc6c7af6..9a02879b235d 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -231,9 +231,6 @@ extern struct page *mem_map_zero;
231#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT) 231#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
232#define pte_page(x) pfn_to_page(pte_pfn(x)) 232#define pte_page(x) pfn_to_page(pte_pfn(x))
233 233
234#define page_pte_prot(page, prot) mk_pte(page, prot)
235#define page_pte(page) page_pte_prot(page, __pgprot(0))
236
237static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) 234static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
238{ 235{
239 pte_t __pte; 236 pte_t __pte;
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index 4568ee4022df..cef5e8270421 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -56,6 +56,11 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
56 atomic_add(delta, (atomic_t *)(&sem->count)); 56 atomic_add(delta, (atomic_t *)(&sem->count));
57} 57}
58 58
59static inline int rwsem_is_locked(struct rw_semaphore *sem)
60{
61 return (sem->count != 0);
62}
63
59#endif /* __KERNEL__ */ 64#endif /* __KERNEL__ */
60 65
61#endif /* _SPARC64_RWSEM_H */ 66#endif /* _SPARC64_RWSEM_H */
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7419dd88b49e..093dcc6788db 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -22,9 +22,6 @@ struct semaphore {
22 { ATOMIC_INIT(count), \ 22 { ATOMIC_INIT(count), \
23 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } 23 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
24 24
25#define __MUTEX_INITIALIZER(name) \
26 __SEMAPHORE_INITIALIZER(name, 1)
27
28#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ 25#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
29 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
30 27
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index 9baf57db01d2..66138d959df5 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -25,9 +25,8 @@ struct mmu_gather {
25 struct mm_struct *mm; 25 struct mm_struct *mm;
26 unsigned int pages_nr; 26 unsigned int pages_nr;
27 unsigned int need_flush; 27 unsigned int need_flush;
28 unsigned int tlb_frozen; 28 unsigned int fullmm;
29 unsigned int tlb_nr; 29 unsigned int tlb_nr;
30 unsigned long freed;
31 unsigned long vaddrs[TLB_BATCH_NR]; 30 unsigned long vaddrs[TLB_BATCH_NR];
32 struct page *pages[FREE_PTE_NR]; 31 struct page *pages[FREE_PTE_NR];
33}; 32};
@@ -44,14 +43,13 @@ extern void flush_tlb_pending(void);
44 43
45static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
46{ 45{
47 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
48 47
49 BUG_ON(mp->tlb_nr); 48 BUG_ON(mp->tlb_nr);
50 49
51 mp->mm = mm; 50 mp->mm = mm;
52 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U; 51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
53 mp->tlb_frozen = full_mm_flush; 52 mp->fullmm = full_mm_flush;
54 mp->freed = 0;
55 53
56 return mp; 54 return mp;
57} 55}
@@ -78,30 +76,19 @@ extern void smp_flush_tlb_mm(struct mm_struct *mm);
78 76
79static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) 77static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
80{ 78{
81 unsigned long freed = mp->freed;
82 struct mm_struct *mm = mp->mm;
83 unsigned long rss = get_mm_counter(mm, rss);
84
85 if (rss < freed)
86 freed = rss;
87 add_mm_counter(mm, rss, -freed);
88
89 tlb_flush_mmu(mp); 79 tlb_flush_mmu(mp);
90 80
91 if (mp->tlb_frozen) { 81 if (mp->fullmm) {
92 if (CTX_VALID(mm->context)) 82 if (CTX_VALID(mp->mm->context))
93 do_flush_tlb_mm(mm); 83 do_flush_tlb_mm(mp->mm);
94 mp->tlb_frozen = 0; 84 mp->fullmm = 0;
95 } else 85 } else
96 flush_tlb_pending(); 86 flush_tlb_pending();
97 87
98 /* keep the page table cache within bounds */ 88 /* keep the page table cache within bounds */
99 check_pgt_cache(); 89 check_pgt_cache();
100}
101 90
102static inline unsigned int tlb_is_full_mm(struct mmu_gather *mp) 91 put_cpu_var(mmu_gathers);
103{
104 return mp->tlb_frozen;
105} 92}
106 93
107static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) 94static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)