aboutsummaryrefslogtreecommitdiffstats
path: root/arch/csky/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky/include')
-rw-r--r--arch/csky/include/asm/io.h25
-rw-r--r--arch/csky/include/asm/pgalloc.h43
-rw-r--r--arch/csky/include/asm/pgtable.h9
-rw-r--r--arch/csky/include/asm/processor.h4
4 files changed, 52 insertions, 29 deletions
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index ecae6b358f95..c1dfa9c10e36 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -15,6 +15,31 @@ extern void iounmap(void *addr);
15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, 15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
16 size_t size, unsigned long flags); 16 size_t size, unsigned long flags);
17 17
18/*
19 * I/O memory access primitives. Reads are ordered relative to any
20 * following Normal memory access. Writes are ordered relative to any prior
21 * Normal memory access.
22 *
23 * For CACHEV1 (807, 810), store instruction could fast retire, so we need
24 * another mb() to prevent st fast retire.
25 *
26 * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
27 * fast retire.
28 */
29#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
30#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
31#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
32
33#ifdef CONFIG_CPU_HAS_CACHEV2
34#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
35#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
36#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
37#else
38#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
39#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
40#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
41#endif
42
18#define ioremap_nocache(phy, sz) ioremap(phy, sz) 43#define ioremap_nocache(phy, sz) ioremap(phy, sz)
19#define ioremap_wc ioremap_nocache 44#define ioremap_wc ioremap_nocache
20#define ioremap_wt ioremap_nocache 45#define ioremap_wt ioremap_nocache
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf4f4a0e140e..d213bb47b717 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -24,41 +24,34 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
24 24
25extern void pgd_init(unsigned long *p); 25extern void pgd_init(unsigned long *p);
26 26
27static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 27static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
28 unsigned long address)
29{ 28{
30 pte_t *pte; 29 pte_t *pte;
31 unsigned long *kaddr, i; 30 unsigned long i;
32 31
33 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, 32 pte = (pte_t *) __get_free_page(GFP_KERNEL);
34 PTE_ORDER); 33 if (!pte)
35 kaddr = (unsigned long *)pte; 34 return NULL;
36 if (address & 0x80000000) 35
37 for (i = 0; i < (PAGE_SIZE/4); i++) 36 for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
38 *(kaddr + i) = 0x1; 37 (pte + i)->pte_low = _PAGE_GLOBAL;
39 else
40 clear_page(kaddr);
41 38
42 return pte; 39 return pte;
43} 40}
44 41
45static inline struct page *pte_alloc_one(struct mm_struct *mm, 42static inline struct page *pte_alloc_one(struct mm_struct *mm)
46 unsigned long address)
47{ 43{
48 struct page *pte; 44 struct page *pte;
49 unsigned long *kaddr, i; 45
50 46 pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
51 pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); 47 if (!pte)
52 if (pte) { 48 return NULL;
53 kaddr = kmap_atomic(pte); 49
54 if (address & 0x80000000) { 50 if (!pgtable_page_ctor(pte)) {
55 for (i = 0; i < (PAGE_SIZE/4); i++) 51 __free_page(pte);
56 *(kaddr + i) = 0x1; 52 return NULL;
57 } else
58 clear_page(kaddr);
59 kunmap_atomic(kaddr);
60 pgtable_page_ctor(pte);
61 } 53 }
54
62 return pte; 55 return pte;
63} 56}
64 57
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index edfcbb25fd9f..dcea277c09ae 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -45,8 +45,8 @@
45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) 45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 46#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
47#define pte_clear(mm, addr, ptep) set_pte((ptep), \ 47#define pte_clear(mm, addr, ptep) set_pte((ptep), \
48 (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) 48 (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
49#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) 49#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 50#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) 51#define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ 52#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
241 241
242#define pgd_index(address) ((address) >> PGDIR_SHIFT) 242#define pgd_index(address) ((address) >> PGDIR_SHIFT)
243 243
244#define __HAVE_PHYS_MEM_ACCESS_PROT
245struct file;
246extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
247 unsigned long size, pgprot_t vma_prot);
248
244/* 249/*
245 * Macro to make mark a page protection value as "uncacheable". Note 250 * Macro to make mark a page protection value as "uncacheable". Note
246 * that "protection" is really a misnomer here as the protection value 251 * that "protection" is really a misnomer here as the protection value
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h
index 8f454810514f..21e0bd5293dd 100644
--- a/arch/csky/include/asm/processor.h
+++ b/arch/csky/include/asm/processor.h
@@ -49,7 +49,7 @@ struct thread_struct {
49}; 49};
50 50
51#define INIT_THREAD { \ 51#define INIT_THREAD { \
52 .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ 52 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
53 .sr = DEFAULT_PSR_VALUE, \ 53 .sr = DEFAULT_PSR_VALUE, \
54} 54}
55 55
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p);
95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) 95#define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp)
96 96
97#define task_pt_regs(p) \ 97#define task_pt_regs(p) \
98 ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) 98 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
99 99
100#define cpu_relax() barrier() 100#define cpu_relax() barrier()
101 101