diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-12-21 01:44:34 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-12-21 01:44:34 -0500 |
commit | 4b4f887fb2780e39383c1f202b72f5a2780b2d4c (patch) | |
tree | e790d89c2612d2464ac00063dac677653f4ac9cb /arch/sh/include/asm | |
parent | 6424db52e24e8cdf89917fa3c10395116440160e (diff) | |
parent | 76e7461a21dfe13565b2a323b53c8cc963541126 (diff) |
Merge branch 'master' into sh/hw-breakpoints
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r-- | arch/sh/include/asm/.gitignore | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/asm-offsets.h | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/dma.h | 6 | ||||
-rw-r--r-- | arch/sh/include/asm/elf.h | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/io.h | 11 | ||||
-rw-r--r-- | arch/sh/include/asm/machvec.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/pgalloc.h | 28 | ||||
-rw-r--r-- | arch/sh/include/asm/pgalloc_nopmd.h | 30 | ||||
-rw-r--r-- | arch/sh/include/asm/pgalloc_pmd.h | 41 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable.h | 22 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable_32.h | 5 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable_nopmd.h | 22 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable_pmd.h | 55 | ||||
-rw-r--r-- | arch/sh/include/asm/sh_eth.h | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/spinlock.h | 58 | ||||
-rw-r--r-- | arch/sh/include/asm/spinlock_types.h | 8 | ||||
-rw-r--r-- | arch/sh/include/asm/unistd_32.h | 3 | ||||
-rw-r--r-- | arch/sh/include/asm/unistd_64.h | 3 |
19 files changed, 217 insertions, 82 deletions
diff --git a/arch/sh/include/asm/.gitignore b/arch/sh/include/asm/.gitignore deleted file mode 100644 index 378db779fb6c..000000000000 --- a/arch/sh/include/asm/.gitignore +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | machtypes.h | ||
diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/sh/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index c29918f3c819..dda96eb3e7c0 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h | |||
@@ -42,6 +42,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, | |||
42 | unsigned long addr, unsigned long pfn); | 42 | unsigned long addr, unsigned long pfn); |
43 | extern void flush_cache_range(struct vm_area_struct *vma, | 43 | extern void flush_cache_range(struct vm_area_struct *vma, |
44 | unsigned long start, unsigned long end); | 44 | unsigned long start, unsigned long end); |
45 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
45 | extern void flush_dcache_page(struct page *page); | 46 | extern void flush_dcache_page(struct page *page); |
46 | extern void flush_icache_range(unsigned long start, unsigned long end); | 47 | extern void flush_icache_range(unsigned long start, unsigned long end); |
47 | extern void flush_icache_page(struct vm_area_struct *vma, | 48 | extern void flush_icache_page(struct vm_area_struct *vma, |
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h index 04ad0e1e637e..07373a074090 100644 --- a/arch/sh/include/asm/dma.h +++ b/arch/sh/include/asm/dma.h | |||
@@ -19,9 +19,11 @@ | |||
19 | #include <asm-generic/dma.h> | 19 | #include <asm-generic/dma.h> |
20 | 20 | ||
21 | #ifdef CONFIG_NR_DMA_CHANNELS | 21 | #ifdef CONFIG_NR_DMA_CHANNELS |
22 | # define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) | 22 | # define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) |
23 | #elif defined(CONFIG_NR_ONCHIP_DMA_CHANNELS) | ||
24 | # define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) | ||
23 | #else | 25 | #else |
24 | # define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) | 26 | # define MAX_DMA_CHANNELS 0 |
25 | #endif | 27 | #endif |
26 | 28 | ||
27 | /* | 29 | /* |
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index ccb1d93bb043..ac04255022b6 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h | |||
@@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t; | |||
114 | */ | 114 | */ |
115 | #define CORE_DUMP_USE_REGSET | 115 | #define CORE_DUMP_USE_REGSET |
116 | 116 | ||
117 | #define USE_ELF_CORE_DUMP | ||
118 | #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC | 117 | #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC |
119 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | 118 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
120 | 119 | ||
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 512cd3e9d0ca..026dd659a640 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -233,11 +233,17 @@ unsigned long long poke_real_address_q(unsigned long long addr, | |||
233 | * doesn't exist, so everything must go through page tables. | 233 | * doesn't exist, so everything must go through page tables. |
234 | */ | 234 | */ |
235 | #ifdef CONFIG_MMU | 235 | #ifdef CONFIG_MMU |
236 | void __iomem *__ioremap(unsigned long offset, unsigned long size, | 236 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, |
237 | unsigned long flags); | 237 | unsigned long flags, void *caller); |
238 | void __iounmap(void __iomem *addr); | 238 | void __iounmap(void __iomem *addr); |
239 | 239 | ||
240 | static inline void __iomem * | 240 | static inline void __iomem * |
241 | __ioremap(unsigned long offset, unsigned long size, unsigned long flags) | ||
242 | { | ||
243 | return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); | ||
244 | } | ||
245 | |||
246 | static inline void __iomem * | ||
241 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | 247 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) |
242 | { | 248 | { |
243 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) | 249 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) |
@@ -271,6 +277,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |||
271 | return __ioremap(offset, size, flags); | 277 | return __ioremap(offset, size, flags); |
272 | } | 278 | } |
273 | #else | 279 | #else |
280 | #define __ioremap(offset, size, flags) ((void __iomem *)(offset)) | ||
274 | #define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) | 281 | #define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) |
275 | #define __iounmap(addr) do { } while (0) | 282 | #define __iounmap(addr) do { } while (0) |
276 | #endif /* CONFIG_MMU */ | 283 | #endif /* CONFIG_MMU */ |
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index 84dd37761f56..9c30955630ff 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/time.h> | 14 | #include <linux/time.h> |
15 | #include <asm/machtypes.h> | 15 | #include <generated/machtypes.h> |
16 | 16 | ||
17 | struct sh_machine_vector { | 17 | struct sh_machine_vector { |
18 | void (*mv_setup)(char **cmdline_p); | 18 | void (*mv_setup)(char **cmdline_p); |
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 63ca37bd9a95..4ea27855c3b5 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h | |||
@@ -4,9 +4,14 @@ | |||
4 | #include <linux/quicklist.h> | 4 | #include <linux/quicklist.h> |
5 | #include <asm/page.h> | 5 | #include <asm/page.h> |
6 | 6 | ||
7 | #define QUICK_PGD 0 /* We preserve special mappings over free */ | ||
8 | #define QUICK_PT 1 /* Other page table pages that are zero on free */ | 7 | #define QUICK_PT 1 /* Other page table pages that are zero on free */ |
9 | 8 | ||
9 | #ifdef CONFIG_PGTABLE_LEVELS_3 | ||
10 | #include <asm/pgalloc_pmd.h> | ||
11 | #else | ||
12 | #include <asm/pgalloc_nopmd.h> | ||
13 | #endif | ||
14 | |||
10 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | 15 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
11 | pte_t *pte) | 16 | pte_t *pte) |
12 | { | 17 | { |
@@ -20,28 +25,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
20 | } | 25 | } |
21 | #define pmd_pgtable(pmd) pmd_page(pmd) | 26 | #define pmd_pgtable(pmd) pmd_page(pmd) |
22 | 27 | ||
23 | static inline void pgd_ctor(void *x) | ||
24 | { | ||
25 | pgd_t *pgd = x; | ||
26 | |||
27 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
28 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
29 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
30 | } | ||
31 | |||
32 | /* | 28 | /* |
33 | * Allocate and free page tables. | 29 | * Allocate and free page tables. |
34 | */ | 30 | */ |
35 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
36 | { | ||
37 | return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); | ||
38 | } | ||
39 | |||
40 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
41 | { | ||
42 | quicklist_free(QUICK_PGD, NULL, pgd); | ||
43 | } | ||
44 | |||
45 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 31 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
46 | unsigned long address) | 32 | unsigned long address) |
47 | { | 33 | { |
@@ -81,7 +67,7 @@ do { \ | |||
81 | 67 | ||
82 | static inline void check_pgt_cache(void) | 68 | static inline void check_pgt_cache(void) |
83 | { | 69 | { |
84 | quicklist_trim(QUICK_PGD, NULL, 25, 16); | 70 | __check_pgt_cache(); |
85 | quicklist_trim(QUICK_PT, NULL, 25, 16); | 71 | quicklist_trim(QUICK_PT, NULL, 25, 16); |
86 | } | 72 | } |
87 | 73 | ||
diff --git a/arch/sh/include/asm/pgalloc_nopmd.h b/arch/sh/include/asm/pgalloc_nopmd.h new file mode 100644 index 000000000000..e4b344c37e74 --- /dev/null +++ b/arch/sh/include/asm/pgalloc_nopmd.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef __ASM_SH_PGALLOC_NOPMD_H | ||
2 | #define __ASM_SH_PGALLOC_NOPMD_H | ||
3 | |||
4 | #define QUICK_PGD 0 /* We preserve special mappings over free */ | ||
5 | |||
6 | static inline void pgd_ctor(void *x) | ||
7 | { | ||
8 | pgd_t *pgd = x; | ||
9 | |||
10 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
11 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
12 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
13 | } | ||
14 | |||
15 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
16 | { | ||
17 | return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); | ||
18 | } | ||
19 | |||
20 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
21 | { | ||
22 | quicklist_free(QUICK_PGD, NULL, pgd); | ||
23 | } | ||
24 | |||
25 | static inline void __check_pgt_cache(void) | ||
26 | { | ||
27 | quicklist_trim(QUICK_PGD, NULL, 25, 16); | ||
28 | } | ||
29 | |||
30 | #endif /* __ASM_SH_PGALLOC_NOPMD_H */ | ||
diff --git a/arch/sh/include/asm/pgalloc_pmd.h b/arch/sh/include/asm/pgalloc_pmd.h new file mode 100644 index 000000000000..20f75cc4eb09 --- /dev/null +++ b/arch/sh/include/asm/pgalloc_pmd.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef __ASM_SH_PGALLOC_PMD_H | ||
2 | #define __ASM_SH_PGALLOC_PMD_H | ||
3 | |||
4 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
5 | { | ||
6 | pgd_t *pgd; | ||
7 | int i; | ||
8 | |||
9 | pgd = kzalloc(sizeof(*pgd) * PTRS_PER_PGD, GFP_KERNEL | __GFP_REPEAT); | ||
10 | |||
11 | for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) | ||
12 | pgd[i] = swapper_pg_dir[i]; | ||
13 | |||
14 | return pgd; | ||
15 | } | ||
16 | |||
17 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
18 | { | ||
19 | kfree(pgd); | ||
20 | } | ||
21 | |||
22 | static inline void __check_pgt_cache(void) | ||
23 | { | ||
24 | } | ||
25 | |||
26 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | ||
27 | { | ||
28 | set_pud(pud, __pud((unsigned long)pmd)); | ||
29 | } | ||
30 | |||
31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
32 | { | ||
33 | return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); | ||
34 | } | ||
35 | |||
36 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
37 | { | ||
38 | quicklist_free(QUICK_PT, NULL, pmd); | ||
39 | } | ||
40 | |||
41 | #endif /* __ASM_SH_PGALLOC_PMD_H */ | ||
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba3046e4f06f..9effcc3b0d10 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -12,7 +12,11 @@ | |||
12 | #ifndef __ASM_SH_PGTABLE_H | 12 | #ifndef __ASM_SH_PGTABLE_H |
13 | #define __ASM_SH_PGTABLE_H | 13 | #define __ASM_SH_PGTABLE_H |
14 | 14 | ||
15 | #include <asm-generic/pgtable-nopmd.h> | 15 | #ifdef CONFIG_PGTABLE_LEVELS_3 |
16 | #include <asm/pgtable_pmd.h> | ||
17 | #else | ||
18 | #include <asm/pgtable_nopmd.h> | ||
19 | #endif | ||
16 | #include <asm/page.h> | 20 | #include <asm/page.h> |
17 | 21 | ||
18 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
@@ -51,28 +55,12 @@ static inline unsigned long long neff_sign_extend(unsigned long val) | |||
51 | #define NPHYS_SIGN (1LL << (NPHYS - 1)) | 55 | #define NPHYS_SIGN (1LL << (NPHYS - 1)) |
52 | #define NPHYS_MASK (-1LL << NPHYS) | 56 | #define NPHYS_MASK (-1LL << NPHYS) |
53 | 57 | ||
54 | /* | ||
55 | * traditional two-level paging structure | ||
56 | */ | ||
57 | /* PTE bits */ | ||
58 | #if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64) | ||
59 | # define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ | ||
60 | #else | ||
61 | # define PTE_MAGNITUDE 2 /* 32-bit PTEs */ | ||
62 | #endif | ||
63 | #define PTE_SHIFT PAGE_SHIFT | ||
64 | #define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) | ||
65 | |||
66 | /* PGD bits */ | ||
67 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) | ||
68 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 58 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
69 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 59 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
70 | 60 | ||
71 | /* Entries per level */ | 61 | /* Entries per level */ |
72 | #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) | 62 | #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) |
73 | #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) | ||
74 | 63 | ||
75 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | ||
76 | #define FIRST_USER_ADDRESS 0 | 64 | #define FIRST_USER_ADDRESS 0 |
77 | 65 | ||
78 | #define PHYS_ADDR_MASK29 0x1fffffff | 66 | #define PHYS_ADDR_MASK29 0x1fffffff |
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index b35435516203..5003ee86f67b 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h | |||
@@ -344,7 +344,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
344 | #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) | 344 | #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) |
345 | 345 | ||
346 | #ifdef CONFIG_X2TLB | 346 | #ifdef CONFIG_X2TLB |
347 | #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) | 347 | #define pte_write(pte) \ |
348 | ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)) | ||
348 | #else | 349 | #else |
349 | #define pte_write(pte) ((pte).pte_low & _PAGE_RW) | 350 | #define pte_write(pte) ((pte).pte_low & _PAGE_RW) |
350 | #endif | 351 | #endif |
@@ -358,7 +359,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } | |||
358 | * individually toggled (and user permissions are entirely decoupled from | 359 | * individually toggled (and user permissions are entirely decoupled from |
359 | * kernel permissions), we attempt to couple them a bit more sanely here. | 360 | * kernel permissions), we attempt to couple them a bit more sanely here. |
360 | */ | 361 | */ |
361 | PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); | 362 | PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)); |
362 | PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); | 363 | PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); |
363 | PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); | 364 | PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); |
364 | #else | 365 | #else |
diff --git a/arch/sh/include/asm/pgtable_nopmd.h b/arch/sh/include/asm/pgtable_nopmd.h new file mode 100644 index 000000000000..f0b525b3cb4a --- /dev/null +++ b/arch/sh/include/asm/pgtable_nopmd.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_SH_PGTABLE_NOPMD_H | ||
2 | #define __ASM_SH_PGTABLE_NOPMD_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopmd.h> | ||
5 | |||
6 | /* | ||
7 | * traditional two-level paging structure | ||
8 | */ | ||
9 | |||
10 | /* PTE bits */ | ||
11 | #define PTE_MAGNITUDE 2 /* 32-bit PTEs */ | ||
12 | |||
13 | #define PTE_SHIFT PAGE_SHIFT | ||
14 | #define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) | ||
15 | |||
16 | /* PGD bits */ | ||
17 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) | ||
18 | |||
19 | #define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE)) | ||
20 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | ||
21 | |||
22 | #endif /* __ASM_SH_PGTABLE_NOPMD_H */ | ||
diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h new file mode 100644 index 000000000000..78dc36e1c2dd --- /dev/null +++ b/arch/sh/include/asm/pgtable_pmd.h | |||
@@ -0,0 +1,55 @@ | |||
1 | #ifndef __ASM_SH_PGTABLE_PMD_H | ||
2 | #define __ASM_SH_PGTABLE_PMD_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | /* | ||
7 | * Some cores need a 3-level page table layout, for example when using | ||
8 | * 64-bit PTEs and 4K pages. | ||
9 | */ | ||
10 | |||
11 | #define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ | ||
12 | |||
13 | /* PGD bits */ | ||
14 | #define PGDIR_SHIFT 30 | ||
15 | |||
16 | #define PTRS_PER_PGD 4 | ||
17 | #define USER_PTRS_PER_PGD 2 | ||
18 | |||
19 | /* PMD bits */ | ||
20 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) | ||
21 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
22 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
23 | |||
24 | #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) | ||
25 | |||
26 | #define pmd_ERROR(e) \ | ||
27 | printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) | ||
28 | |||
29 | typedef struct { unsigned long long pmd; } pmd_t; | ||
30 | #define pmd_val(x) ((x).pmd) | ||
31 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
32 | |||
33 | static inline unsigned long pud_page_vaddr(pud_t pud) | ||
34 | { | ||
35 | return pud_val(pud); | ||
36 | } | ||
37 | |||
38 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
39 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | ||
40 | { | ||
41 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | ||
42 | } | ||
43 | |||
44 | #define pud_none(x) (!pud_val(x)) | ||
45 | #define pud_present(x) (pud_val(x)) | ||
46 | #define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0) | ||
47 | #define pud_bad(x) (pud_val(x) & ~PAGE_MASK) | ||
48 | |||
49 | /* | ||
50 | * (puds are folded into pgds so this doesn't get actually called, | ||
51 | * but the define is needed for a generic inline function.) | ||
52 | */ | ||
53 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) | ||
54 | |||
55 | #endif /* __ASM_SH_PGTABLE_PMD_H */ | ||
diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h index acf99700deed..f739061e2ee4 100644 --- a/arch/sh/include/asm/sh_eth.h +++ b/arch/sh/include/asm/sh_eth.h | |||
@@ -7,6 +7,7 @@ struct sh_eth_plat_data { | |||
7 | int phy; | 7 | int phy; |
8 | int edmac_endian; | 8 | int edmac_endian; |
9 | 9 | ||
10 | unsigned char mac_addr[6]; | ||
10 | unsigned no_ether_link:1; | 11 | unsigned no_ether_link:1; |
11 | unsigned ether_link_active_low:1; | 12 | unsigned ether_link_active_low:1; |
12 | }; | 13 | }; |
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053fd..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -23,10 +23,10 @@ | |||
23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define __raw_spin_is_locked(x) ((x)->lock <= 0) | 26 | #define arch_spin_is_locked(x) ((x)->lock <= 0) |
27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 27 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
28 | #define __raw_spin_unlock_wait(x) \ | 28 | #define arch_spin_unlock_wait(x) \ |
29 | do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) | 29 | do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Simple spin lock operations. There are two variants, one clears IRQ's | 32 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -34,14 +34,14 @@ | |||
34 | * | 34 | * |
35 | * We make no fairness assumptions. They have a cost. | 35 | * We make no fairness assumptions. They have a cost. |
36 | */ | 36 | */ |
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 37 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned long tmp; | 39 | unsigned long tmp; |
40 | unsigned long oldval; | 40 | unsigned long oldval; |
41 | 41 | ||
42 | __asm__ __volatile__ ( | 42 | __asm__ __volatile__ ( |
43 | "1: \n\t" | 43 | "1: \n\t" |
44 | "movli.l @%2, %0 ! __raw_spin_lock \n\t" | 44 | "movli.l @%2, %0 ! arch_spin_lock \n\t" |
45 | "mov %0, %1 \n\t" | 45 | "mov %0, %1 \n\t" |
46 | "mov #0, %0 \n\t" | 46 | "mov #0, %0 \n\t" |
47 | "movco.l %0, @%2 \n\t" | 47 | "movco.l %0, @%2 \n\t" |
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
54 | ); | 54 | ); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 57 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp; | 59 | unsigned long tmp; |
60 | 60 | ||
61 | __asm__ __volatile__ ( | 61 | __asm__ __volatile__ ( |
62 | "mov #1, %0 ! __raw_spin_unlock \n\t" | 62 | "mov #1, %0 ! arch_spin_unlock \n\t" |
63 | "mov.l %0, @%1 \n\t" | 63 | "mov.l %0, @%1 \n\t" |
64 | : "=&z" (tmp) | 64 | : "=&z" (tmp) |
65 | : "r" (&lock->lock) | 65 | : "r" (&lock->lock) |
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
67 | ); | 67 | ); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 70 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned long tmp, oldval; | 72 | unsigned long tmp, oldval; |
73 | 73 | ||
74 | __asm__ __volatile__ ( | 74 | __asm__ __volatile__ ( |
75 | "1: \n\t" | 75 | "1: \n\t" |
76 | "movli.l @%2, %0 ! __raw_spin_trylock \n\t" | 76 | "movli.l @%2, %0 ! arch_spin_trylock \n\t" |
77 | "mov %0, %1 \n\t" | 77 | "mov %0, %1 \n\t" |
78 | "mov #0, %0 \n\t" | 78 | "mov #0, %0 \n\t" |
79 | "movco.l %0, @%2 \n\t" | 79 | "movco.l %0, @%2 \n\t" |
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
100 | * read_can_lock - would read_trylock() succeed? | 100 | * read_can_lock - would read_trylock() succeed? |
101 | * @lock: the rwlock in question. | 101 | * @lock: the rwlock in question. |
102 | */ | 102 | */ |
103 | #define __raw_read_can_lock(x) ((x)->lock > 0) | 103 | #define arch_read_can_lock(x) ((x)->lock > 0) |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * write_can_lock - would write_trylock() succeed? | 106 | * write_can_lock - would write_trylock() succeed? |
107 | * @lock: the rwlock in question. | 107 | * @lock: the rwlock in question. |
108 | */ | 108 | */ |
109 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 109 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
110 | 110 | ||
111 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 111 | static inline void arch_read_lock(arch_rwlock_t *rw) |
112 | { | 112 | { |
113 | unsigned long tmp; | 113 | unsigned long tmp; |
114 | 114 | ||
115 | __asm__ __volatile__ ( | 115 | __asm__ __volatile__ ( |
116 | "1: \n\t" | 116 | "1: \n\t" |
117 | "movli.l @%1, %0 ! __raw_read_lock \n\t" | 117 | "movli.l @%1, %0 ! arch_read_lock \n\t" |
118 | "cmp/pl %0 \n\t" | 118 | "cmp/pl %0 \n\t" |
119 | "bf 1b \n\t" | 119 | "bf 1b \n\t" |
120 | "add #-1, %0 \n\t" | 120 | "add #-1, %0 \n\t" |
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
126 | ); | 126 | ); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 129 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
130 | { | 130 | { |
131 | unsigned long tmp; | 131 | unsigned long tmp; |
132 | 132 | ||
133 | __asm__ __volatile__ ( | 133 | __asm__ __volatile__ ( |
134 | "1: \n\t" | 134 | "1: \n\t" |
135 | "movli.l @%1, %0 ! __raw_read_unlock \n\t" | 135 | "movli.l @%1, %0 ! arch_read_unlock \n\t" |
136 | "add #1, %0 \n\t" | 136 | "add #1, %0 \n\t" |
137 | "movco.l %0, @%1 \n\t" | 137 | "movco.l %0, @%1 \n\t" |
138 | "bf 1b \n\t" | 138 | "bf 1b \n\t" |
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
142 | ); | 142 | ); |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 145 | static inline void arch_write_lock(arch_rwlock_t *rw) |
146 | { | 146 | { |
147 | unsigned long tmp; | 147 | unsigned long tmp; |
148 | 148 | ||
149 | __asm__ __volatile__ ( | 149 | __asm__ __volatile__ ( |
150 | "1: \n\t" | 150 | "1: \n\t" |
151 | "movli.l @%1, %0 ! __raw_write_lock \n\t" | 151 | "movli.l @%1, %0 ! arch_write_lock \n\t" |
152 | "cmp/hs %2, %0 \n\t" | 152 | "cmp/hs %2, %0 \n\t" |
153 | "bf 1b \n\t" | 153 | "bf 1b \n\t" |
154 | "sub %2, %0 \n\t" | 154 | "sub %2, %0 \n\t" |
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
160 | ); | 160 | ); |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 163 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
164 | { | 164 | { |
165 | __asm__ __volatile__ ( | 165 | __asm__ __volatile__ ( |
166 | "mov.l %1, @%0 ! __raw_write_unlock \n\t" | 166 | "mov.l %1, @%0 ! arch_write_unlock \n\t" |
167 | : | 167 | : |
168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | 168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) |
169 | : "t", "memory" | 169 | : "t", "memory" |
170 | ); | 170 | ); |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 173 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
174 | { | 174 | { |
175 | unsigned long tmp, oldval; | 175 | unsigned long tmp, oldval; |
176 | 176 | ||
177 | __asm__ __volatile__ ( | 177 | __asm__ __volatile__ ( |
178 | "1: \n\t" | 178 | "1: \n\t" |
179 | "movli.l @%2, %0 ! __raw_read_trylock \n\t" | 179 | "movli.l @%2, %0 ! arch_read_trylock \n\t" |
180 | "mov %0, %1 \n\t" | 180 | "mov %0, %1 \n\t" |
181 | "cmp/pl %0 \n\t" | 181 | "cmp/pl %0 \n\t" |
182 | "bf 2f \n\t" | 182 | "bf 2f \n\t" |
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
193 | return (oldval > 0); | 193 | return (oldval > 0); |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 196 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
197 | { | 197 | { |
198 | unsigned long tmp, oldval; | 198 | unsigned long tmp, oldval; |
199 | 199 | ||
200 | __asm__ __volatile__ ( | 200 | __asm__ __volatile__ ( |
201 | "1: \n\t" | 201 | "1: \n\t" |
202 | "movli.l @%2, %0 ! __raw_write_trylock \n\t" | 202 | "movli.l @%2, %0 ! arch_write_trylock \n\t" |
203 | "mov %0, %1 \n\t" | 203 | "mov %0, %1 \n\t" |
204 | "cmp/hs %3, %0 \n\t" | 204 | "cmp/hs %3, %0 \n\t" |
205 | "bf 2f \n\t" | 205 | "bf 2f \n\t" |
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
216 | return (oldval > (RW_LOCK_BIAS - 1)); | 216 | return (oldval > (RW_LOCK_BIAS - 1)); |
217 | } | 217 | } |
218 | 218 | ||
219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 219 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 220 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
221 | 221 | ||
222 | #define _raw_spin_relax(lock) cpu_relax() | 222 | #define arch_spin_relax(lock) cpu_relax() |
223 | #define _raw_read_relax(lock) cpu_relax() | 223 | #define arch_read_relax(lock) cpu_relax() |
224 | #define _raw_write_relax(lock) cpu_relax() | 224 | #define arch_write_relax(lock) cpu_relax() |
225 | 225 | ||
226 | #endif /* __ASM_SH_SPINLOCK_H */ | 226 | #endif /* __ASM_SH_SPINLOCK_H */ |
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60c..9b7560db06ca 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define RW_LOCK_BIAS 0x01000000 | 18 | #define RW_LOCK_BIAS 0x01000000 |
19 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index f3fd1b9eb6b1..f18c4f9baf27 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h | |||
@@ -345,8 +345,9 @@ | |||
345 | #define __NR_pwritev 334 | 345 | #define __NR_pwritev 334 |
346 | #define __NR_rt_tgsigqueueinfo 335 | 346 | #define __NR_rt_tgsigqueueinfo 335 |
347 | #define __NR_perf_event_open 336 | 347 | #define __NR_perf_event_open 336 |
348 | #define __NR_recvmmsg 337 | ||
348 | 349 | ||
349 | #define NR_syscalls 337 | 350 | #define NR_syscalls 338 |
350 | 351 | ||
351 | #ifdef __KERNEL__ | 352 | #ifdef __KERNEL__ |
352 | 353 | ||
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index 343ce8f073ea..3e7645d11130 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h | |||
@@ -385,10 +385,11 @@ | |||
385 | #define __NR_pwritev 362 | 385 | #define __NR_pwritev 362 |
386 | #define __NR_rt_tgsigqueueinfo 363 | 386 | #define __NR_rt_tgsigqueueinfo 363 |
387 | #define __NR_perf_event_open 364 | 387 | #define __NR_perf_event_open 364 |
388 | #define __NR_recvmmsg 365 | ||
388 | 389 | ||
389 | #ifdef __KERNEL__ | 390 | #ifdef __KERNEL__ |
390 | 391 | ||
391 | #define NR_syscalls 365 | 392 | #define NR_syscalls 366 |
392 | 393 | ||
393 | #define __ARCH_WANT_IPC_PARSE_VERSION | 394 | #define __ARCH_WANT_IPC_PARSE_VERSION |
394 | #define __ARCH_WANT_OLD_READDIR | 395 | #define __ARCH_WANT_OLD_READDIR |