diff options
Diffstat (limited to 'include/asm-generic')
32 files changed, 3416 insertions, 0 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h new file mode 100644 index 000000000000..c20ec257ecc0 --- /dev/null +++ b/include/asm-generic/4level-fixup.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _4LEVEL_FIXUP_H | ||
2 | #define _4LEVEL_FIXUP_H | ||
3 | |||
4 | #define __ARCH_HAS_4LEVEL_HACK | ||
5 | #define __PAGETABLE_PUD_FOLDED | ||
6 | |||
7 | #define PUD_SIZE PGDIR_SIZE | ||
8 | #define PUD_MASK PGDIR_MASK | ||
9 | #define PTRS_PER_PUD 1 | ||
10 | |||
11 | #define pud_t pgd_t | ||
12 | |||
13 | #define pmd_alloc(mm, pud, address) \ | ||
14 | ({ pmd_t *ret; \ | ||
15 | if (pgd_none(*pud)) \ | ||
16 | ret = __pmd_alloc(mm, pud, address); \ | ||
17 | else \ | ||
18 | ret = pmd_offset(pud, address); \ | ||
19 | ret; \ | ||
20 | }) | ||
21 | |||
22 | #define pud_alloc(mm, pgd, address) (pgd) | ||
23 | #define pud_offset(pgd, start) (pgd) | ||
24 | #define pud_none(pud) 0 | ||
25 | #define pud_bad(pud) 0 | ||
26 | #define pud_present(pud) 1 | ||
27 | #define pud_ERROR(pud) do { } while (0) | ||
28 | #define pud_clear(pud) pgd_clear(pud) | ||
29 | |||
30 | #undef pud_free_tlb | ||
31 | #define pud_free_tlb(tlb, x) do { } while (0) | ||
32 | #define pud_free(x) do { } while (0) | ||
33 | #define __pud_free_tlb(tlb, x) do { } while (0) | ||
34 | |||
35 | #undef pud_addr_end | ||
36 | #define pud_addr_end(addr, end) (end) | ||
37 | |||
38 | #endif | ||
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h new file mode 100644 index 000000000000..ce31b739fd80 --- /dev/null +++ b/include/asm-generic/bitops.h | |||
@@ -0,0 +1,81 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_H_ | ||
3 | |||
4 | /* | ||
5 | * For the benefit of those who are trying to port Linux to another | ||
6 | * architecture, here are some C-language equivalents. You should | ||
7 | * recode these in the native assembly language, if at all possible. | ||
8 | * To guarantee atomicity, these routines call cli() and sti() to | ||
9 | * disable interrupts while they operate. (You have to provide inline | ||
10 | * routines to cli() and sti().) | ||
11 | * | ||
12 | * Also note, these routines assume that you have 32 bit longs. | ||
13 | * You will have to change this if you are trying to port Linux to the | ||
14 | * Alpha architecture or to a Cray. :-) | ||
15 | * | ||
16 | * C language equivalents written by Theodore Ts'o, 9/26/92 | ||
17 | */ | ||
18 | |||
19 | extern __inline__ int set_bit(int nr,long * addr) | ||
20 | { | ||
21 | int mask, retval; | ||
22 | |||
23 | addr += nr >> 5; | ||
24 | mask = 1 << (nr & 0x1f); | ||
25 | cli(); | ||
26 | retval = (mask & *addr) != 0; | ||
27 | *addr |= mask; | ||
28 | sti(); | ||
29 | return retval; | ||
30 | } | ||
31 | |||
32 | extern __inline__ int clear_bit(int nr, long * addr) | ||
33 | { | ||
34 | int mask, retval; | ||
35 | |||
36 | addr += nr >> 5; | ||
37 | mask = 1 << (nr & 0x1f); | ||
38 | cli(); | ||
39 | retval = (mask & *addr) != 0; | ||
40 | *addr &= ~mask; | ||
41 | sti(); | ||
42 | return retval; | ||
43 | } | ||
44 | |||
45 | extern __inline__ int test_bit(int nr, const unsigned long * addr) | ||
46 | { | ||
47 | int mask; | ||
48 | |||
49 | addr += nr >> 5; | ||
50 | mask = 1 << (nr & 0x1f); | ||
51 | return ((mask & *addr) != 0); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * fls: find last bit set. | ||
56 | */ | ||
57 | |||
58 | #define fls(x) generic_fls(x) | ||
59 | |||
60 | #ifdef __KERNEL__ | ||
61 | |||
62 | /* | ||
63 | * ffs: find first bit set. This is defined the same way as | ||
64 | * the libc and compiler builtin ffs routines, therefore | ||
65 | * differs in spirit from the above ffz (man ffs). | ||
66 | */ | ||
67 | |||
68 | #define ffs(x) generic_ffs(x) | ||
69 | |||
70 | /* | ||
71 | * hweightN: returns the hamming weight (i.e. the number | ||
72 | * of bits set) of a N-bit word | ||
73 | */ | ||
74 | |||
75 | #define hweight32(x) generic_hweight32(x) | ||
76 | #define hweight16(x) generic_hweight16(x) | ||
77 | #define hweight8(x) generic_hweight8(x) | ||
78 | |||
79 | #endif /* __KERNEL__ */ | ||
80 | |||
81 | #endif /* _ASM_GENERIC_BITOPS_H */ | ||
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h new file mode 100644 index 000000000000..e5913c3b715a --- /dev/null +++ b/include/asm-generic/bug.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _ASM_GENERIC_BUG_H | ||
2 | #define _ASM_GENERIC_BUG_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/config.h> | ||
6 | |||
7 | #ifndef HAVE_ARCH_BUG | ||
8 | #define BUG() do { \ | ||
9 | printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ | ||
10 | panic("BUG!"); \ | ||
11 | } while (0) | ||
12 | #endif | ||
13 | |||
14 | #ifndef HAVE_ARCH_PAGE_BUG | ||
15 | #define PAGE_BUG(page) do { \ | ||
16 | printk("page BUG for page at %p\n", page); \ | ||
17 | BUG(); \ | ||
18 | } while (0) | ||
19 | #endif | ||
20 | |||
21 | #ifndef HAVE_ARCH_BUG_ON | ||
22 | #define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) | ||
23 | #endif | ||
24 | |||
25 | #ifndef HAVE_ARCH_WARN_ON | ||
26 | #define WARN_ON(condition) do { \ | ||
27 | if (unlikely((condition)!=0)) { \ | ||
28 | printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ | ||
29 | dump_stack(); \ | ||
30 | } \ | ||
31 | } while (0) | ||
32 | #endif | ||
33 | |||
34 | #endif | ||
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h new file mode 100644 index 000000000000..6f178563e336 --- /dev/null +++ b/include/asm-generic/cputime.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef _ASM_GENERIC_CPUTIME_H | ||
2 | #define _ASM_GENERIC_CPUTIME_H | ||
3 | |||
4 | #include <linux/time.h> | ||
5 | #include <linux/jiffies.h> | ||
6 | |||
7 | typedef unsigned long cputime_t; | ||
8 | |||
9 | #define cputime_zero (0UL) | ||
10 | #define cputime_max ((~0UL >> 1) - 1) | ||
11 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
12 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
13 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
14 | #define cputime_halve(__a) ((__a) >> 1) | ||
15 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
16 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
17 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
18 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
19 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
20 | #define cputime_to_jiffies(__ct) (__ct) | ||
21 | #define jiffies_to_cputime(__hz) (__hz) | ||
22 | |||
23 | typedef u64 cputime64_t; | ||
24 | |||
25 | #define cputime64_zero (0ULL) | ||
26 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
27 | #define cputime64_to_jiffies64(__ct) (__ct) | ||
28 | #define cputime_to_cputime64(__ct) ((u64) __ct) | ||
29 | |||
30 | |||
31 | /* | ||
32 | * Convert cputime to milliseconds and back. | ||
33 | */ | ||
34 | #define cputime_to_msecs(__ct) jiffies_to_msecs(__ct) | ||
35 | #define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs) | ||
36 | |||
37 | /* | ||
38 | * Convert cputime to seconds and back. | ||
39 | */ | ||
40 | #define cputime_to_secs(jif) ((jif) / HZ) | ||
41 | #define secs_to_cputime(sec) ((sec) * HZ) | ||
42 | |||
43 | /* | ||
44 | * Convert cputime to timespec and back. | ||
45 | */ | ||
46 | #define timespec_to_cputime(__val) timespec_to_jiffies(__val) | ||
47 | #define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) | ||
48 | |||
49 | /* | ||
50 | * Convert cputime to timeval and back. | ||
51 | */ | ||
52 | #define timeval_to_cputime(__val) timeval_to_jiffies(__val) | ||
53 | #define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) | ||
54 | |||
55 | /* | ||
56 | * Convert cputime to clock and back. | ||
57 | */ | ||
58 | #define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) | ||
59 | #define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) | ||
60 | |||
61 | /* | ||
62 | * Convert cputime64 to clock. | ||
63 | */ | ||
64 | #define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) | ||
65 | |||
66 | #endif | ||
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h new file mode 100644 index 000000000000..8f4e3193342e --- /dev/null +++ b/include/asm-generic/div64.h | |||
@@ -0,0 +1,58 @@ | |||
1 | #ifndef _ASM_GENERIC_DIV64_H | ||
2 | #define _ASM_GENERIC_DIV64_H | ||
3 | /* | ||
4 | * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> | ||
5 | * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h | ||
6 | * | ||
7 | * The semantics of do_div() are: | ||
8 | * | ||
9 | * uint32_t do_div(uint64_t *n, uint32_t base) | ||
10 | * { | ||
11 | * uint32_t remainder = *n % base; | ||
12 | * *n = *n / base; | ||
13 | * return remainder; | ||
14 | * } | ||
15 | * | ||
16 | * NOTE: macro parameter n is evaluated multiple times, | ||
17 | * beware of side effects! | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/compiler.h> | ||
22 | |||
23 | #if BITS_PER_LONG == 64 | ||
24 | |||
25 | # define do_div(n,base) ({ \ | ||
26 | uint32_t __base = (base); \ | ||
27 | uint32_t __rem; \ | ||
28 | __rem = ((uint64_t)(n)) % __base; \ | ||
29 | (n) = ((uint64_t)(n)) / __base; \ | ||
30 | __rem; \ | ||
31 | }) | ||
32 | |||
33 | #elif BITS_PER_LONG == 32 | ||
34 | |||
35 | extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); | ||
36 | |||
37 | /* The unnecessary pointer compare is there | ||
38 | * to check for type safety (n must be 64bit) | ||
39 | */ | ||
40 | # define do_div(n,base) ({ \ | ||
41 | uint32_t __base = (base); \ | ||
42 | uint32_t __rem; \ | ||
43 | (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ | ||
44 | if (likely(((n) >> 32) == 0)) { \ | ||
45 | __rem = (uint32_t)(n) % __base; \ | ||
46 | (n) = (uint32_t)(n) / __base; \ | ||
47 | } else \ | ||
48 | __rem = __div64_32(&(n), __base); \ | ||
49 | __rem; \ | ||
50 | }) | ||
51 | |||
52 | #else /* BITS_PER_LONG == ?? */ | ||
53 | |||
54 | # error do_div() does not yet support the C64 | ||
55 | |||
56 | #endif /* BITS_PER_LONG */ | ||
57 | |||
58 | #endif /* _ASM_GENERIC_DIV64_H */ | ||
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h new file mode 100644 index 000000000000..fd9de9502dff --- /dev/null +++ b/include/asm-generic/dma-mapping-broken.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
2 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
3 | |||
4 | /* This is used for archs that do not support DMA */ | ||
5 | |||
6 | |||
7 | static inline void * | ||
8 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
9 | int flag) | ||
10 | { | ||
11 | BUG(); | ||
12 | return NULL; | ||
13 | } | ||
14 | |||
15 | static inline void | ||
16 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
17 | dma_addr_t dma_handle) | ||
18 | { | ||
19 | BUG(); | ||
20 | } | ||
21 | |||
22 | #endif /* _ASM_GENERIC_DMA_MAPPING_H */ | ||
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h new file mode 100644 index 000000000000..8cef663c5cd9 --- /dev/null +++ b/include/asm-generic/dma-mapping.h | |||
@@ -0,0 +1,309 @@ | |||
1 | /* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com | ||
2 | * | ||
3 | * Implements the generic device dma API via the existing pci_ one | ||
4 | * for unconverted architectures | ||
5 | */ | ||
6 | |||
7 | #ifndef _ASM_GENERIC_DMA_MAPPING_H | ||
8 | #define _ASM_GENERIC_DMA_MAPPING_H | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | |||
12 | #ifdef CONFIG_PCI | ||
13 | |||
14 | /* we implement the API below in terms of the existing PCI one, | ||
15 | * so include it */ | ||
16 | #include <linux/pci.h> | ||
17 | /* need struct page definitions */ | ||
18 | #include <linux/mm.h> | ||
19 | |||
20 | static inline int | ||
21 | dma_supported(struct device *dev, u64 mask) | ||
22 | { | ||
23 | BUG_ON(dev->bus != &pci_bus_type); | ||
24 | |||
25 | return pci_dma_supported(to_pci_dev(dev), mask); | ||
26 | } | ||
27 | |||
28 | static inline int | ||
29 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
30 | { | ||
31 | BUG_ON(dev->bus != &pci_bus_type); | ||
32 | |||
33 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | ||
34 | } | ||
35 | |||
36 | static inline void * | ||
37 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
38 | unsigned int __nocast flag) | ||
39 | { | ||
40 | BUG_ON(dev->bus != &pci_bus_type); | ||
41 | |||
42 | return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); | ||
43 | } | ||
44 | |||
45 | static inline void | ||
46 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
47 | dma_addr_t dma_handle) | ||
48 | { | ||
49 | BUG_ON(dev->bus != &pci_bus_type); | ||
50 | |||
51 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); | ||
52 | } | ||
53 | |||
54 | static inline dma_addr_t | ||
55 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
56 | enum dma_data_direction direction) | ||
57 | { | ||
58 | BUG_ON(dev->bus != &pci_bus_type); | ||
59 | |||
60 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); | ||
61 | } | ||
62 | |||
63 | static inline void | ||
64 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
65 | enum dma_data_direction direction) | ||
66 | { | ||
67 | BUG_ON(dev->bus != &pci_bus_type); | ||
68 | |||
69 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); | ||
70 | } | ||
71 | |||
72 | static inline dma_addr_t | ||
73 | dma_map_page(struct device *dev, struct page *page, | ||
74 | unsigned long offset, size_t size, | ||
75 | enum dma_data_direction direction) | ||
76 | { | ||
77 | BUG_ON(dev->bus != &pci_bus_type); | ||
78 | |||
79 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); | ||
80 | } | ||
81 | |||
82 | static inline void | ||
83 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
84 | enum dma_data_direction direction) | ||
85 | { | ||
86 | BUG_ON(dev->bus != &pci_bus_type); | ||
87 | |||
88 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); | ||
89 | } | ||
90 | |||
91 | static inline int | ||
92 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
93 | enum dma_data_direction direction) | ||
94 | { | ||
95 | BUG_ON(dev->bus != &pci_bus_type); | ||
96 | |||
97 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); | ||
98 | } | ||
99 | |||
100 | static inline void | ||
101 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
102 | enum dma_data_direction direction) | ||
103 | { | ||
104 | BUG_ON(dev->bus != &pci_bus_type); | ||
105 | |||
106 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); | ||
107 | } | ||
108 | |||
109 | static inline void | ||
110 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
111 | enum dma_data_direction direction) | ||
112 | { | ||
113 | BUG_ON(dev->bus != &pci_bus_type); | ||
114 | |||
115 | pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, | ||
116 | size, (int)direction); | ||
117 | } | ||
118 | |||
119 | static inline void | ||
120 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
121 | enum dma_data_direction direction) | ||
122 | { | ||
123 | BUG_ON(dev->bus != &pci_bus_type); | ||
124 | |||
125 | pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, | ||
126 | size, (int)direction); | ||
127 | } | ||
128 | |||
129 | static inline void | ||
130 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
131 | enum dma_data_direction direction) | ||
132 | { | ||
133 | BUG_ON(dev->bus != &pci_bus_type); | ||
134 | |||
135 | pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); | ||
136 | } | ||
137 | |||
138 | static inline void | ||
139 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
140 | enum dma_data_direction direction) | ||
141 | { | ||
142 | BUG_ON(dev->bus != &pci_bus_type); | ||
143 | |||
144 | pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); | ||
145 | } | ||
146 | |||
147 | static inline int | ||
148 | dma_mapping_error(dma_addr_t dma_addr) | ||
149 | { | ||
150 | return pci_dma_mapping_error(dma_addr); | ||
151 | } | ||
152 | |||
153 | |||
154 | #else | ||
155 | |||
156 | static inline int | ||
157 | dma_supported(struct device *dev, u64 mask) | ||
158 | { | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static inline int | ||
163 | dma_set_mask(struct device *dev, u64 dma_mask) | ||
164 | { | ||
165 | BUG(); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static inline void * | ||
170 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
171 | unsigned int __nocast flag) | ||
172 | { | ||
173 | BUG(); | ||
174 | return NULL; | ||
175 | } | ||
176 | |||
177 | static inline void | ||
178 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | ||
179 | dma_addr_t dma_handle) | ||
180 | { | ||
181 | BUG(); | ||
182 | } | ||
183 | |||
184 | static inline dma_addr_t | ||
185 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | ||
186 | enum dma_data_direction direction) | ||
187 | { | ||
188 | BUG(); | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static inline void | ||
193 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
194 | enum dma_data_direction direction) | ||
195 | { | ||
196 | BUG(); | ||
197 | } | ||
198 | |||
199 | static inline dma_addr_t | ||
200 | dma_map_page(struct device *dev, struct page *page, | ||
201 | unsigned long offset, size_t size, | ||
202 | enum dma_data_direction direction) | ||
203 | { | ||
204 | BUG(); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static inline void | ||
209 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
210 | enum dma_data_direction direction) | ||
211 | { | ||
212 | BUG(); | ||
213 | } | ||
214 | |||
215 | static inline int | ||
216 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
217 | enum dma_data_direction direction) | ||
218 | { | ||
219 | BUG(); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | static inline void | ||
224 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
225 | enum dma_data_direction direction) | ||
226 | { | ||
227 | BUG(); | ||
228 | } | ||
229 | |||
230 | static inline void | ||
231 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
232 | enum dma_data_direction direction) | ||
233 | { | ||
234 | BUG(); | ||
235 | } | ||
236 | |||
237 | static inline void | ||
238 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
239 | enum dma_data_direction direction) | ||
240 | { | ||
241 | BUG(); | ||
242 | } | ||
243 | |||
244 | static inline void | ||
245 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
246 | enum dma_data_direction direction) | ||
247 | { | ||
248 | BUG(); | ||
249 | } | ||
250 | |||
251 | static inline void | ||
252 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
253 | enum dma_data_direction direction) | ||
254 | { | ||
255 | BUG(); | ||
256 | } | ||
257 | |||
258 | static inline int | ||
259 | dma_error(dma_addr_t dma_addr) | ||
260 | { | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | #endif | ||
265 | |||
266 | /* Now for the API extensions over the pci_ one */ | ||
267 | |||
268 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
269 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
270 | #define dma_is_consistent(d) (1) | ||
271 | |||
272 | static inline int | ||
273 | dma_get_cache_alignment(void) | ||
274 | { | ||
275 | /* no easy way to get cache size on all processors, so return | ||
276 | * the maximum possible, to be safe */ | ||
277 | return (1 << L1_CACHE_SHIFT_MAX); | ||
278 | } | ||
279 | |||
280 | static inline void | ||
281 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
282 | unsigned long offset, size_t size, | ||
283 | enum dma_data_direction direction) | ||
284 | { | ||
285 | /* just sync everything, that's all the pci API can do */ | ||
286 | dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); | ||
287 | } | ||
288 | |||
289 | static inline void | ||
290 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
291 | unsigned long offset, size_t size, | ||
292 | enum dma_data_direction direction) | ||
293 | { | ||
294 | /* just sync everything, that's all the pci API can do */ | ||
295 | dma_sync_single_for_device(dev, dma_handle, offset+size, direction); | ||
296 | } | ||
297 | |||
298 | static inline void | ||
299 | dma_cache_sync(void *vaddr, size_t size, | ||
300 | enum dma_data_direction direction) | ||
301 | { | ||
302 | /* could define this in terms of the dma_cache ... operations, | ||
303 | * but if you get this on a platform, you should convert the platform | ||
304 | * to using the generic device DMA API */ | ||
305 | BUG(); | ||
306 | } | ||
307 | |||
308 | #endif | ||
309 | |||
diff --git a/include/asm-generic/errno-base.h b/include/asm-generic/errno-base.h new file mode 100644 index 000000000000..65115978510f --- /dev/null +++ b/include/asm-generic/errno-base.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef _ASM_GENERIC_ERRNO_BASE_H | ||
2 | #define _ASM_GENERIC_ERRNO_BASE_H | ||
3 | |||
4 | #define EPERM 1 /* Operation not permitted */ | ||
5 | #define ENOENT 2 /* No such file or directory */ | ||
6 | #define ESRCH 3 /* No such process */ | ||
7 | #define EINTR 4 /* Interrupted system call */ | ||
8 | #define EIO 5 /* I/O error */ | ||
9 | #define ENXIO 6 /* No such device or address */ | ||
10 | #define E2BIG 7 /* Argument list too long */ | ||
11 | #define ENOEXEC 8 /* Exec format error */ | ||
12 | #define EBADF 9 /* Bad file number */ | ||
13 | #define ECHILD 10 /* No child processes */ | ||
14 | #define EAGAIN 11 /* Try again */ | ||
15 | #define ENOMEM 12 /* Out of memory */ | ||
16 | #define EACCES 13 /* Permission denied */ | ||
17 | #define EFAULT 14 /* Bad address */ | ||
18 | #define ENOTBLK 15 /* Block device required */ | ||
19 | #define EBUSY 16 /* Device or resource busy */ | ||
20 | #define EEXIST 17 /* File exists */ | ||
21 | #define EXDEV 18 /* Cross-device link */ | ||
22 | #define ENODEV 19 /* No such device */ | ||
23 | #define ENOTDIR 20 /* Not a directory */ | ||
24 | #define EISDIR 21 /* Is a directory */ | ||
25 | #define EINVAL 22 /* Invalid argument */ | ||
26 | #define ENFILE 23 /* File table overflow */ | ||
27 | #define EMFILE 24 /* Too many open files */ | ||
28 | #define ENOTTY 25 /* Not a typewriter */ | ||
29 | #define ETXTBSY 26 /* Text file busy */ | ||
30 | #define EFBIG 27 /* File too large */ | ||
31 | #define ENOSPC 28 /* No space left on device */ | ||
32 | #define ESPIPE 29 /* Illegal seek */ | ||
33 | #define EROFS 30 /* Read-only file system */ | ||
34 | #define EMLINK 31 /* Too many links */ | ||
35 | #define EPIPE 32 /* Broken pipe */ | ||
36 | #define EDOM 33 /* Math argument out of domain of func */ | ||
37 | #define ERANGE 34 /* Math result not representable */ | ||
38 | |||
39 | #endif | ||
diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h new file mode 100644 index 000000000000..4dd2384bc38d --- /dev/null +++ b/include/asm-generic/errno.h | |||
@@ -0,0 +1,105 @@ | |||
1 | #ifndef _ASM_GENERIC_ERRNO_H | ||
2 | #define _ASM_GENERIC_ERRNO_H | ||
3 | |||
4 | #include <asm-generic/errno-base.h> | ||
5 | |||
6 | #define EDEADLK 35 /* Resource deadlock would occur */ | ||
7 | #define ENAMETOOLONG 36 /* File name too long */ | ||
8 | #define ENOLCK 37 /* No record locks available */ | ||
9 | #define ENOSYS 38 /* Function not implemented */ | ||
10 | #define ENOTEMPTY 39 /* Directory not empty */ | ||
11 | #define ELOOP 40 /* Too many symbolic links encountered */ | ||
12 | #define EWOULDBLOCK EAGAIN /* Operation would block */ | ||
13 | #define ENOMSG 42 /* No message of desired type */ | ||
14 | #define EIDRM 43 /* Identifier removed */ | ||
15 | #define ECHRNG 44 /* Channel number out of range */ | ||
16 | #define EL2NSYNC 45 /* Level 2 not synchronized */ | ||
17 | #define EL3HLT 46 /* Level 3 halted */ | ||
18 | #define EL3RST 47 /* Level 3 reset */ | ||
19 | #define ELNRNG 48 /* Link number out of range */ | ||
20 | #define EUNATCH 49 /* Protocol driver not attached */ | ||
21 | #define ENOCSI 50 /* No CSI structure available */ | ||
22 | #define EL2HLT 51 /* Level 2 halted */ | ||
23 | #define EBADE 52 /* Invalid exchange */ | ||
24 | #define EBADR 53 /* Invalid request descriptor */ | ||
25 | #define EXFULL 54 /* Exchange full */ | ||
26 | #define ENOANO 55 /* No anode */ | ||
27 | #define EBADRQC 56 /* Invalid request code */ | ||
28 | #define EBADSLT 57 /* Invalid slot */ | ||
29 | |||
30 | #define EDEADLOCK EDEADLK | ||
31 | |||
32 | #define EBFONT 59 /* Bad font file format */ | ||
33 | #define ENOSTR 60 /* Device not a stream */ | ||
34 | #define ENODATA 61 /* No data available */ | ||
35 | #define ETIME 62 /* Timer expired */ | ||
36 | #define ENOSR 63 /* Out of streams resources */ | ||
37 | #define ENONET 64 /* Machine is not on the network */ | ||
38 | #define ENOPKG 65 /* Package not installed */ | ||
39 | #define EREMOTE 66 /* Object is remote */ | ||
40 | #define ENOLINK 67 /* Link has been severed */ | ||
41 | #define EADV 68 /* Advertise error */ | ||
42 | #define ESRMNT 69 /* Srmount error */ | ||
43 | #define ECOMM 70 /* Communication error on send */ | ||
44 | #define EPROTO 71 /* Protocol error */ | ||
45 | #define EMULTIHOP 72 /* Multihop attempted */ | ||
46 | #define EDOTDOT 73 /* RFS specific error */ | ||
47 | #define EBADMSG 74 /* Not a data message */ | ||
48 | #define EOVERFLOW 75 /* Value too large for defined data type */ | ||
49 | #define ENOTUNIQ 76 /* Name not unique on network */ | ||
50 | #define EBADFD 77 /* File descriptor in bad state */ | ||
51 | #define EREMCHG 78 /* Remote address changed */ | ||
52 | #define ELIBACC 79 /* Can not access a needed shared library */ | ||
53 | #define ELIBBAD 80 /* Accessing a corrupted shared library */ | ||
54 | #define ELIBSCN 81 /* .lib section in a.out corrupted */ | ||
55 | #define ELIBMAX 82 /* Attempting to link in too many shared libraries */ | ||
56 | #define ELIBEXEC 83 /* Cannot exec a shared library directly */ | ||
57 | #define EILSEQ 84 /* Illegal byte sequence */ | ||
58 | #define ERESTART 85 /* Interrupted system call should be restarted */ | ||
59 | #define ESTRPIPE 86 /* Streams pipe error */ | ||
60 | #define EUSERS 87 /* Too many users */ | ||
61 | #define ENOTSOCK 88 /* Socket operation on non-socket */ | ||
62 | #define EDESTADDRREQ 89 /* Destination address required */ | ||
63 | #define EMSGSIZE 90 /* Message too long */ | ||
64 | #define EPROTOTYPE 91 /* Protocol wrong type for socket */ | ||
65 | #define ENOPROTOOPT 92 /* Protocol not available */ | ||
66 | #define EPROTONOSUPPORT 93 /* Protocol not supported */ | ||
67 | #define ESOCKTNOSUPPORT 94 /* Socket type not supported */ | ||
68 | #define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ | ||
69 | #define EPFNOSUPPORT 96 /* Protocol family not supported */ | ||
70 | #define EAFNOSUPPORT 97 /* Address family not supported by protocol */ | ||
71 | #define EADDRINUSE 98 /* Address already in use */ | ||
72 | #define EADDRNOTAVAIL 99 /* Cannot assign requested address */ | ||
73 | #define ENETDOWN 100 /* Network is down */ | ||
74 | #define ENETUNREACH 101 /* Network is unreachable */ | ||
75 | #define ENETRESET 102 /* Network dropped connection because of reset */ | ||
76 | #define ECONNABORTED 103 /* Software caused connection abort */ | ||
77 | #define ECONNRESET 104 /* Connection reset by peer */ | ||
78 | #define ENOBUFS 105 /* No buffer space available */ | ||
79 | #define EISCONN 106 /* Transport endpoint is already connected */ | ||
80 | #define ENOTCONN 107 /* Transport endpoint is not connected */ | ||
81 | #define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ | ||
82 | #define ETOOMANYREFS 109 /* Too many references: cannot splice */ | ||
83 | #define ETIMEDOUT 110 /* Connection timed out */ | ||
84 | #define ECONNREFUSED 111 /* Connection refused */ | ||
85 | #define EHOSTDOWN 112 /* Host is down */ | ||
86 | #define EHOSTUNREACH 113 /* No route to host */ | ||
87 | #define EALREADY 114 /* Operation already in progress */ | ||
88 | #define EINPROGRESS 115 /* Operation now in progress */ | ||
89 | #define ESTALE 116 /* Stale NFS file handle */ | ||
90 | #define EUCLEAN 117 /* Structure needs cleaning */ | ||
91 | #define ENOTNAM 118 /* Not a XENIX named type file */ | ||
92 | #define ENAVAIL 119 /* No XENIX semaphores available */ | ||
93 | #define EISNAM 120 /* Is a named type file */ | ||
94 | #define EREMOTEIO 121 /* Remote I/O error */ | ||
95 | #define EDQUOT 122 /* Quota exceeded */ | ||
96 | |||
97 | #define ENOMEDIUM 123 /* No medium found */ | ||
98 | #define EMEDIUMTYPE 124 /* Wrong medium type */ | ||
99 | #define ECANCELED 125 /* Operation Canceled */ | ||
100 | #define ENOKEY 126 /* Required key not available */ | ||
101 | #define EKEYEXPIRED 127 /* Key has expired */ | ||
102 | #define EKEYREVOKED 128 /* Key has been revoked */ | ||
103 | #define EKEYREJECTED 129 /* Key was rejected by service */ | ||
104 | |||
105 | #endif | ||
diff --git a/include/asm-generic/hdreg.h b/include/asm-generic/hdreg.h new file mode 100644 index 000000000000..7051fba8bcf9 --- /dev/null +++ b/include/asm-generic/hdreg.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #warning <asm/hdreg.h> is obsolete, please do not use it | ||
2 | |||
3 | #ifndef __ASM_GENERIC_HDREG_H | ||
4 | #define __ASM_GENERIC_HDREG_H | ||
5 | |||
6 | typedef unsigned long ide_ioreg_t; | ||
7 | |||
8 | #endif /* __ASM_GENERIC_HDREG_H */ | ||
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h new file mode 100644 index 000000000000..1b91d0681914 --- /dev/null +++ b/include/asm-generic/ide_iops.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* Generic I/O and MEMIO string operations. */ | ||
2 | |||
3 | #define __ide_insw insw | ||
4 | #define __ide_insl insl | ||
5 | #define __ide_outsw outsw | ||
6 | #define __ide_outsl outsl | ||
7 | |||
8 | static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count) | ||
9 | { | ||
10 | while (count--) { | ||
11 | *(u16 *)addr = readw(port); | ||
12 | addr += 2; | ||
13 | } | ||
14 | } | ||
15 | |||
16 | static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count) | ||
17 | { | ||
18 | while (count--) { | ||
19 | *(u32 *)addr = readl(port); | ||
20 | addr += 4; | ||
21 | } | ||
22 | } | ||
23 | |||
24 | static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) | ||
25 | { | ||
26 | while (count--) { | ||
27 | writew(*(u16 *)addr, port); | ||
28 | addr += 2; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) | ||
33 | { | ||
34 | while (count--) { | ||
35 | writel(*(u32 *)addr, port); | ||
36 | addr += 4; | ||
37 | } | ||
38 | } | ||
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h new file mode 100644 index 000000000000..4991543d44c8 --- /dev/null +++ b/include/asm-generic/iomap.h | |||
@@ -0,0 +1,63 @@ | |||
1 | #ifndef __GENERIC_IO_H | ||
2 | #define __GENERIC_IO_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | /* | ||
7 | * These are the "generic" interfaces for doing new-style | ||
8 | * memory-mapped or PIO accesses. Architectures may do | ||
9 | * their own arch-optimized versions, these just act as | ||
10 | * wrappers around the old-style IO register access functions: | ||
11 | * read[bwl]/write[bwl]/in[bwl]/out[bwl] | ||
12 | * | ||
13 | * Don't include this directly, include it from <asm/io.h>. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Read/write from/to an (offsettable) iomem cookie. It might be a PIO | ||
18 | * access or a MMIO access, these functions don't care. The info is | ||
19 | * encoded in the hardware mapping set up by the mapping functions | ||
20 | * (or the cookie itself, depending on implementation and hw). | ||
21 | * | ||
22 | * The generic routines just encode the PIO/MMIO as part of the | ||
23 | * cookie, and coldly assume that the MMIO IO mappings are not | ||
24 | * in the low address range. Architectures for which this is not | ||
25 | * true can't use this generic implementation. | ||
26 | */ | ||
27 | extern unsigned int fastcall ioread8(void __iomem *); | ||
28 | extern unsigned int fastcall ioread16(void __iomem *); | ||
29 | extern unsigned int fastcall ioread32(void __iomem *); | ||
30 | |||
31 | extern void fastcall iowrite8(u8, void __iomem *); | ||
32 | extern void fastcall iowrite16(u16, void __iomem *); | ||
33 | extern void fastcall iowrite32(u32, void __iomem *); | ||
34 | |||
35 | /* | ||
36 | * "string" versions of the above. Note that they | ||
37 | * use native byte ordering for the accesses (on | ||
38 | * the assumption that IO and memory agree on a | ||
39 | * byte order, and CPU byteorder is irrelevant). | ||
40 | * | ||
41 | * They do _not_ update the port address. If you | ||
42 | * want MMIO that copies stuff laid out in MMIO | ||
43 | * memory across multiple ports, use "memcpy_toio()" | ||
44 | * and friends. | ||
45 | */ | ||
46 | extern void fastcall ioread8_rep(void __iomem *port, void *buf, unsigned long count); | ||
47 | extern void fastcall ioread16_rep(void __iomem *port, void *buf, unsigned long count); | ||
48 | extern void fastcall ioread32_rep(void __iomem *port, void *buf, unsigned long count); | ||
49 | |||
50 | extern void fastcall iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); | ||
51 | extern void fastcall iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); | ||
52 | extern void fastcall iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); | ||
53 | |||
54 | /* Create a virtual mapping cookie for an IO port range */ | ||
55 | extern void __iomem *ioport_map(unsigned long port, unsigned int nr); | ||
56 | extern void ioport_unmap(void __iomem *); | ||
57 | |||
58 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | ||
59 | struct pci_dev; | ||
60 | extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
61 | extern void pci_iounmap(struct pci_dev *dev, void __iomem *); | ||
62 | |||
63 | #endif | ||
diff --git a/include/asm-generic/ipc.h b/include/asm-generic/ipc.h new file mode 100644 index 000000000000..a40407a165ce --- /dev/null +++ b/include/asm-generic/ipc.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef _ASM_GENERIC_IPC_H | ||
2 | #define _ASM_GENERIC_IPC_H | ||
3 | /* | ||
4 | * These are used to wrap system calls. | ||
5 | * | ||
6 | * See architecture code for ugly details.. | ||
7 | */ | ||
8 | struct ipc_kludge { | ||
9 | struct msgbuf __user *msgp; | ||
10 | long msgtyp; | ||
11 | }; | ||
12 | |||
13 | #define SEMOP 1 | ||
14 | #define SEMGET 2 | ||
15 | #define SEMCTL 3 | ||
16 | #define SEMTIMEDOP 4 | ||
17 | #define MSGSND 11 | ||
18 | #define MSGRCV 12 | ||
19 | #define MSGGET 13 | ||
20 | #define MSGCTL 14 | ||
21 | #define SHMAT 21 | ||
22 | #define SHMDT 22 | ||
23 | #define SHMGET 23 | ||
24 | #define SHMCTL 24 | ||
25 | |||
26 | /* Used by the DIPC package, try and avoid reusing it */ | ||
27 | #define DIPC 25 | ||
28 | |||
29 | #define IPCCALL(version,op) ((version)<<16 | (op)) | ||
30 | |||
31 | #endif /* _ASM_GENERIC_IPC_H */ | ||
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h new file mode 100644 index 000000000000..16fc00360f75 --- /dev/null +++ b/include/asm-generic/local.h | |||
@@ -0,0 +1,118 @@ | |||
1 | #ifndef _ASM_GENERIC_LOCAL_H | ||
2 | #define _ASM_GENERIC_LOCAL_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/percpu.h> | ||
6 | #include <linux/hardirq.h> | ||
7 | #include <asm/types.h> | ||
8 | |||
9 | /* An unsigned long type for operations which are atomic for a single | ||
10 | * CPU. Usually used in combination with per-cpu variables. */ | ||
11 | |||
12 | #if BITS_PER_LONG == 32 | ||
13 | /* Implement in terms of atomics. */ | ||
14 | |||
15 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ | ||
16 | typedef struct | ||
17 | { | ||
18 | atomic_t a; | ||
19 | } local_t; | ||
20 | |||
21 | #define LOCAL_INIT(i) { ATOMIC_INIT(i) } | ||
22 | |||
23 | #define local_read(l) ((unsigned long)atomic_read(&(l)->a)) | ||
24 | #define local_set(l,i) atomic_set((&(l)->a),(i)) | ||
25 | #define local_inc(l) atomic_inc(&(l)->a) | ||
26 | #define local_dec(l) atomic_dec(&(l)->a) | ||
27 | #define local_add(i,l) atomic_add((i),(&(l)->a)) | ||
28 | #define local_sub(i,l) atomic_sub((i),(&(l)->a)) | ||
29 | |||
30 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
31 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
32 | #define __local_inc(l) local_set((l), local_read(l) + 1) | ||
33 | #define __local_dec(l) local_set((l), local_read(l) - 1) | ||
34 | #define __local_add(i,l) local_set((l), local_read(l) + (i)) | ||
35 | #define __local_sub(i,l) local_set((l), local_read(l) - (i)) | ||
36 | |||
37 | #else /* ... can't use atomics. */ | ||
38 | /* Implement in terms of three variables. | ||
39 | Another option would be to use local_irq_save/restore. */ | ||
40 | |||
41 | typedef struct | ||
42 | { | ||
43 | /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */ | ||
44 | unsigned long v[3]; | ||
45 | } local_t; | ||
46 | |||
47 | #define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()]) | ||
48 | |||
49 | #define LOCAL_INIT(i) { { (i), 0, 0 } } | ||
50 | |||
51 | static inline unsigned long local_read(local_t *l) | ||
52 | { | ||
53 | return l->v[0] + l->v[1] + l->v[2]; | ||
54 | } | ||
55 | |||
56 | static inline void local_set(local_t *l, unsigned long v) | ||
57 | { | ||
58 | l->v[0] = v; | ||
59 | l->v[1] = l->v[2] = 0; | ||
60 | } | ||
61 | |||
62 | static inline void local_inc(local_t *l) | ||
63 | { | ||
64 | preempt_disable(); | ||
65 | _LOCAL_VAR(l)++; | ||
66 | preempt_enable(); | ||
67 | } | ||
68 | |||
69 | static inline void local_dec(local_t *l) | ||
70 | { | ||
71 | preempt_disable(); | ||
72 | _LOCAL_VAR(l)--; | ||
73 | preempt_enable(); | ||
74 | } | ||
75 | |||
76 | static inline void local_add(unsigned long v, local_t *l) | ||
77 | { | ||
78 | preempt_disable(); | ||
79 | _LOCAL_VAR(l) += v; | ||
80 | preempt_enable(); | ||
81 | } | ||
82 | |||
83 | static inline void local_sub(unsigned long v, local_t *l) | ||
84 | { | ||
85 | preempt_disable(); | ||
86 | _LOCAL_VAR(l) -= v; | ||
87 | preempt_enable(); | ||
88 | } | ||
89 | |||
90 | /* Non-atomic variants, ie. preemption disabled and won't be touched | ||
91 | * in interrupt, etc. Some archs can optimize this case well. */ | ||
92 | #define __local_inc(l) ((l)->v[0]++) | ||
93 | #define __local_dec(l) ((l)->v[0]--) | ||
94 | #define __local_add(i,l) ((l)->v[0] += (i)) | ||
95 | #define __local_sub(i,l) ((l)->v[0] -= (i)) | ||
96 | |||
97 | #endif /* Non-atomic implementation */ | ||
98 | |||
99 | /* Use these for per-cpu local_t variables: on some archs they are | ||
100 | * much more efficient than these naive implementations. Note they take | ||
101 | * a variable (eg. mystruct.foo), not an address. | ||
102 | */ | ||
103 | #define cpu_local_read(v) local_read(&__get_cpu_var(v)) | ||
104 | #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) | ||
105 | #define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) | ||
106 | #define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) | ||
107 | #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) | ||
108 | #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) | ||
109 | |||
110 | /* Non-atomic increments, ie. preemption disabled and won't be touched | ||
111 | * in interrupt, etc. Some archs can optimize this case well. | ||
112 | */ | ||
113 | #define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) | ||
114 | #define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) | ||
115 | #define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) | ||
116 | #define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) | ||
117 | |||
118 | #endif /* _ASM_GENERIC_LOCAL_H */ | ||
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h new file mode 100644 index 000000000000..25c10e96b2b7 --- /dev/null +++ b/include/asm-generic/pci-dma-compat.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* include this file if the platform implements the dma_ DMA Mapping API | ||
2 | * and wants to provide the pci_ DMA Mapping API in terms of it */ | ||
3 | |||
4 | #ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H | ||
5 | #define _ASM_GENERIC_PCI_DMA_COMPAT_H | ||
6 | |||
7 | #include <linux/dma-mapping.h> | ||
8 | |||
9 | /* note pci_set_dma_mask isn't here, since it's a public function | ||
10 | * exported from drivers/pci, use dma_supported instead */ | ||
11 | |||
12 | static inline int | ||
13 | pci_dma_supported(struct pci_dev *hwdev, u64 mask) | ||
14 | { | ||
15 | return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask); | ||
16 | } | ||
17 | |||
18 | static inline void * | ||
19 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, | ||
20 | dma_addr_t *dma_handle) | ||
21 | { | ||
22 | return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); | ||
23 | } | ||
24 | |||
25 | static inline void | ||
26 | pci_free_consistent(struct pci_dev *hwdev, size_t size, | ||
27 | void *vaddr, dma_addr_t dma_handle) | ||
28 | { | ||
29 | dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); | ||
30 | } | ||
31 | |||
32 | static inline dma_addr_t | ||
33 | pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) | ||
34 | { | ||
35 | return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); | ||
36 | } | ||
37 | |||
38 | static inline void | ||
39 | pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | ||
40 | size_t size, int direction) | ||
41 | { | ||
42 | dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); | ||
43 | } | ||
44 | |||
45 | static inline dma_addr_t | ||
46 | pci_map_page(struct pci_dev *hwdev, struct page *page, | ||
47 | unsigned long offset, size_t size, int direction) | ||
48 | { | ||
49 | return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); | ||
50 | } | ||
51 | |||
52 | static inline void | ||
53 | pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, | ||
54 | size_t size, int direction) | ||
55 | { | ||
56 | dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); | ||
57 | } | ||
58 | |||
59 | static inline int | ||
60 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
61 | int nents, int direction) | ||
62 | { | ||
63 | return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); | ||
64 | } | ||
65 | |||
66 | static inline void | ||
67 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | ||
68 | int nents, int direction) | ||
69 | { | ||
70 | dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); | ||
71 | } | ||
72 | |||
73 | static inline void | ||
74 | pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, | ||
75 | size_t size, int direction) | ||
76 | { | ||
77 | dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); | ||
78 | } | ||
79 | |||
80 | static inline void | ||
81 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | ||
82 | size_t size, int direction) | ||
83 | { | ||
84 | dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, | ||
89 | int nelems, int direction) | ||
90 | { | ||
91 | dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); | ||
92 | } | ||
93 | |||
94 | static inline void | ||
95 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | ||
96 | int nelems, int direction) | ||
97 | { | ||
98 | dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); | ||
99 | } | ||
100 | |||
101 | static inline int | ||
102 | pci_dma_mapping_error(dma_addr_t dma_addr) | ||
103 | { | ||
104 | return dma_mapping_error(dma_addr); | ||
105 | } | ||
106 | |||
107 | #endif | ||
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h new file mode 100644 index 000000000000..9d4cc47bde39 --- /dev/null +++ b/include/asm-generic/pci.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * linux/include/asm-generic/pci.h | ||
3 | * | ||
4 | * Copyright (C) 2003 Russell King | ||
5 | */ | ||
6 | #ifndef _ASM_GENERIC_PCI_H | ||
7 | #define _ASM_GENERIC_PCI_H | ||
8 | |||
9 | /** | ||
10 | * pcibios_resource_to_bus - convert resource to PCI bus address | ||
11 | * @dev: device which owns this resource | ||
12 | * @region: converted bus-centric region (start,end) | ||
13 | * @res: resource to convert | ||
14 | * | ||
15 | * Convert a resource to a PCI device bus address or bus window. | ||
16 | */ | ||
17 | static inline void | ||
18 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
19 | struct resource *res) | ||
20 | { | ||
21 | region->start = res->start; | ||
22 | region->end = res->end; | ||
23 | } | ||
24 | |||
25 | #define pcibios_scan_all_fns(a, b) 0 | ||
26 | |||
27 | #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ | ||
28 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | ||
29 | { | ||
30 | return channel ? 15 : 14; | ||
31 | } | ||
32 | #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ | ||
33 | |||
34 | #endif | ||
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h new file mode 100644 index 000000000000..3b709b84934f --- /dev/null +++ b/include/asm-generic/percpu.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ASM_GENERIC_PERCPU_H_ | ||
2 | #define _ASM_GENERIC_PERCPU_H_ | ||
3 | #include <linux/compiler.h> | ||
4 | |||
5 | #define __GENERIC_PER_CPU | ||
6 | #ifdef CONFIG_SMP | ||
7 | |||
8 | extern unsigned long __per_cpu_offset[NR_CPUS]; | ||
9 | |||
10 | /* Separate out the type, so (int[3], foo) works. */ | ||
11 | #define DEFINE_PER_CPU(type, name) \ | ||
12 | __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name | ||
13 | |||
14 | /* var is in discarded region: offset to particular copy we want */ | ||
15 | #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) | ||
16 | #define __get_cpu_var(var) per_cpu(var, smp_processor_id()) | ||
17 | |||
18 | /* A macro to avoid #include hell... */ | ||
19 | #define percpu_modcopy(pcpudst, src, size) \ | ||
20 | do { \ | ||
21 | unsigned int __i; \ | ||
22 | for (__i = 0; __i < NR_CPUS; __i++) \ | ||
23 | if (cpu_possible(__i)) \ | ||
24 | memcpy((pcpudst)+__per_cpu_offset[__i], \ | ||
25 | (src), (size)); \ | ||
26 | } while (0) | ||
27 | #else /* ! SMP */ | ||
28 | |||
29 | #define DEFINE_PER_CPU(type, name) \ | ||
30 | __typeof__(type) per_cpu__##name | ||
31 | |||
32 | #define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) | ||
33 | #define __get_cpu_var(var) per_cpu__##var | ||
34 | |||
35 | #endif /* SMP */ | ||
36 | |||
37 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name | ||
38 | |||
39 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
40 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
41 | |||
42 | #endif /* _ASM_GENERIC_PERCPU_H_ */ | ||
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h new file mode 100644 index 000000000000..c8d53ba20e19 --- /dev/null +++ b/include/asm-generic/pgtable-nopmd.h | |||
@@ -0,0 +1,65 @@ | |||
1 | #ifndef _PGTABLE_NOPMD_H | ||
2 | #define _PGTABLE_NOPMD_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #include <asm-generic/pgtable-nopud.h> | ||
7 | |||
8 | #define __PAGETABLE_PMD_FOLDED | ||
9 | |||
10 | /* | ||
11 | * Having the pmd type consist of a pud gets the size right, and allows | ||
12 | * us to conceptually access the pud entry that this pmd is folded into | ||
13 | * without casting. | ||
14 | */ | ||
15 | typedef struct { pud_t pud; } pmd_t; | ||
16 | |||
17 | #define PMD_SHIFT PUD_SHIFT | ||
18 | #define PTRS_PER_PMD 1 | ||
19 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
20 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
21 | |||
22 | /* | ||
23 | * The "pud_xxx()" functions here are trivial for a folded two-level | ||
24 | * setup: the pmd is never bad, and a pmd always exists (as it's folded | ||
25 | * into the pud entry) | ||
26 | */ | ||
27 | static inline int pud_none(pud_t pud) { return 0; } | ||
28 | static inline int pud_bad(pud_t pud) { return 0; } | ||
29 | static inline int pud_present(pud_t pud) { return 1; } | ||
30 | static inline void pud_clear(pud_t *pud) { } | ||
31 | #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) | ||
32 | |||
33 | #define pud_populate(mm, pmd, pte) do { } while (0) | ||
34 | |||
35 | /* | ||
36 | * (pmds are folded into puds so this doesn't get actually called, | ||
37 | * but the define is needed for a generic inline function.) | ||
38 | */ | ||
39 | #define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) | ||
40 | |||
41 | static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) | ||
42 | { | ||
43 | return (pmd_t *)pud; | ||
44 | } | ||
45 | |||
46 | #define pmd_val(x) (pud_val((x).pud)) | ||
47 | #define __pmd(x) ((pmd_t) { __pud(x) } ) | ||
48 | |||
49 | #define pud_page(pud) (pmd_page((pmd_t){ pud })) | ||
50 | #define pud_page_kernel(pud) (pmd_page_kernel((pmd_t){ pud })) | ||
51 | |||
52 | /* | ||
53 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | ||
54 | * inside the pud, so has no extra memory associated with it. | ||
55 | */ | ||
56 | #define pmd_alloc_one(mm, address) NULL | ||
57 | #define pmd_free(x) do { } while (0) | ||
58 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
59 | |||
60 | #undef pmd_addr_end | ||
61 | #define pmd_addr_end(addr, end) (end) | ||
62 | |||
63 | #endif /* __ASSEMBLY__ */ | ||
64 | |||
65 | #endif /* _PGTABLE_NOPMD_H */ | ||
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h new file mode 100644 index 000000000000..82e29f0ce467 --- /dev/null +++ b/include/asm-generic/pgtable-nopud.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #ifndef _PGTABLE_NOPUD_H | ||
2 | #define _PGTABLE_NOPUD_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #define __PAGETABLE_PUD_FOLDED | ||
7 | |||
8 | /* | ||
9 | * Having the pud type consist of a pgd gets the size right, and allows | ||
10 | * us to conceptually access the pgd entry that this pud is folded into | ||
11 | * without casting. | ||
12 | */ | ||
13 | typedef struct { pgd_t pgd; } pud_t; | ||
14 | |||
15 | #define PUD_SHIFT PGDIR_SHIFT | ||
16 | #define PTRS_PER_PUD 1 | ||
17 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
18 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
19 | |||
20 | /* | ||
21 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
22 | * setup: the pud is never bad, and a pud always exists (as it's folded | ||
23 | * into the pgd entry) | ||
24 | */ | ||
25 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
26 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
27 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
28 | static inline void pgd_clear(pgd_t *pgd) { } | ||
29 | #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) | ||
30 | |||
31 | #define pgd_populate(mm, pgd, pud) do { } while (0) | ||
32 | /* | ||
33 | * (puds are folded into pgds so this doesn't get actually called, | ||
34 | * but the define is needed for a generic inline function.) | ||
35 | */ | ||
36 | #define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) | ||
37 | |||
38 | static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) | ||
39 | { | ||
40 | return (pud_t *)pgd; | ||
41 | } | ||
42 | |||
43 | #define pud_val(x) (pgd_val((x).pgd)) | ||
44 | #define __pud(x) ((pud_t) { __pgd(x) } ) | ||
45 | |||
46 | #define pgd_page(pgd) (pud_page((pud_t){ pgd })) | ||
47 | #define pgd_page_kernel(pgd) (pud_page_kernel((pud_t){ pgd })) | ||
48 | |||
49 | /* | ||
50 | * allocating and freeing a pud is trivial: the 1-entry pud is | ||
51 | * inside the pgd, so has no extra memory associated with it. | ||
52 | */ | ||
53 | #define pud_alloc_one(mm, address) NULL | ||
54 | #define pud_free(x) do { } while (0) | ||
55 | #define __pud_free_tlb(tlb, x) do { } while (0) | ||
56 | |||
57 | #undef pud_addr_end | ||
58 | #define pud_addr_end(addr, end) (end) | ||
59 | |||
60 | #endif /* __ASSEMBLY__ */ | ||
61 | #endif /* _PGTABLE_NOPUD_H */ | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h new file mode 100644 index 000000000000..a3b28710d56c --- /dev/null +++ b/include/asm-generic/pgtable.h | |||
@@ -0,0 +1,213 @@ | |||
1 | #ifndef _ASM_GENERIC_PGTABLE_H | ||
2 | #define _ASM_GENERIC_PGTABLE_H | ||
3 | |||
4 | #ifndef __HAVE_ARCH_PTEP_ESTABLISH | ||
5 | /* | ||
6 | * Establish a new mapping: | ||
7 | * - flush the old one | ||
8 | * - update the page tables | ||
9 | * - inform the TLB about the new one | ||
10 | * | ||
11 | * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock. | ||
12 | * | ||
13 | * Note: the old pte is known to not be writable, so we don't need to | ||
14 | * worry about dirty bits etc getting lost. | ||
15 | */ | ||
16 | #ifndef __HAVE_ARCH_SET_PTE_ATOMIC | ||
17 | #define ptep_establish(__vma, __address, __ptep, __entry) \ | ||
18 | do { \ | ||
19 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | ||
20 | flush_tlb_page(__vma, __address); \ | ||
21 | } while (0) | ||
22 | #else /* __HAVE_ARCH_SET_PTE_ATOMIC */ | ||
23 | #define ptep_establish(__vma, __address, __ptep, __entry) \ | ||
24 | do { \ | ||
25 | set_pte_atomic(__ptep, __entry); \ | ||
26 | flush_tlb_page(__vma, __address); \ | ||
27 | } while (0) | ||
28 | #endif /* __HAVE_ARCH_SET_PTE_ATOMIC */ | ||
29 | #endif | ||
30 | |||
31 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
32 | /* | ||
33 | * Largely same as above, but only sets the access flags (dirty, | ||
34 | * accessed, and writable). Furthermore, we know it always gets set | ||
35 | * to a "more permissive" setting, which allows most architectures | ||
36 | * to optimize this. | ||
37 | */ | ||
38 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
39 | do { \ | ||
40 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | ||
41 | flush_tlb_page(__vma, __address); \ | ||
42 | } while (0) | ||
43 | #endif | ||
44 | |||
45 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
46 | #define ptep_test_and_clear_young(__vma, __address, __ptep) \ | ||
47 | ({ \ | ||
48 | pte_t __pte = *(__ptep); \ | ||
49 | int r = 1; \ | ||
50 | if (!pte_young(__pte)) \ | ||
51 | r = 0; \ | ||
52 | else \ | ||
53 | set_pte_at((__vma)->vm_mm, (__address), \ | ||
54 | (__ptep), pte_mkold(__pte)); \ | ||
55 | r; \ | ||
56 | }) | ||
57 | #endif | ||
58 | |||
59 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
60 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | ||
61 | ({ \ | ||
62 | int __young; \ | ||
63 | __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ | ||
64 | if (__young) \ | ||
65 | flush_tlb_page(__vma, __address); \ | ||
66 | __young; \ | ||
67 | }) | ||
68 | #endif | ||
69 | |||
70 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
71 | #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ | ||
72 | ({ \ | ||
73 | pte_t __pte = *__ptep; \ | ||
74 | int r = 1; \ | ||
75 | if (!pte_dirty(__pte)) \ | ||
76 | r = 0; \ | ||
77 | else \ | ||
78 | set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ | ||
79 | pte_mkclean(__pte)); \ | ||
80 | r; \ | ||
81 | }) | ||
82 | #endif | ||
83 | |||
84 | #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
85 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ | ||
86 | ({ \ | ||
87 | int __dirty; \ | ||
88 | __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ | ||
89 | if (__dirty) \ | ||
90 | flush_tlb_page(__vma, __address); \ | ||
91 | __dirty; \ | ||
92 | }) | ||
93 | #endif | ||
94 | |||
95 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
96 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | ||
97 | ({ \ | ||
98 | pte_t __pte = *(__ptep); \ | ||
99 | pte_clear((__mm), (__address), (__ptep)); \ | ||
100 | __pte; \ | ||
101 | }) | ||
102 | #endif | ||
103 | |||
104 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
105 | #define ptep_clear_flush(__vma, __address, __ptep) \ | ||
106 | ({ \ | ||
107 | pte_t __pte; \ | ||
108 | __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ | ||
109 | flush_tlb_page(__vma, __address); \ | ||
110 | __pte; \ | ||
111 | }) | ||
112 | #endif | ||
113 | |||
114 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
115 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | ||
116 | { | ||
117 | pte_t old_pte = *ptep; | ||
118 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); | ||
119 | } | ||
120 | #endif | ||
121 | |||
122 | #ifndef __HAVE_ARCH_PTE_SAME | ||
123 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) | ||
124 | #endif | ||
125 | |||
126 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY | ||
127 | #define page_test_and_clear_dirty(page) (0) | ||
128 | #endif | ||
129 | |||
130 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
131 | #define page_test_and_clear_young(page) (0) | ||
132 | #endif | ||
133 | |||
134 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE | ||
135 | #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) | ||
136 | #endif | ||
137 | |||
138 | #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE | ||
139 | #define lazy_mmu_prot_update(pte) do { } while (0) | ||
140 | #endif | ||
141 | |||
142 | /* | ||
143 | * When walking page tables, get the address of the next boundary, or | ||
144 | * the end address of the range if that comes earlier. Although end might | ||
145 | * wrap to 0 only in clear_page_range, __boundary may wrap to 0 throughout. | ||
146 | */ | ||
147 | |||
148 | #ifndef pgd_addr_end | ||
149 | #define pgd_addr_end(addr, end) \ | ||
150 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ | ||
151 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | ||
152 | }) | ||
153 | #endif | ||
154 | |||
155 | #ifndef pud_addr_end | ||
156 | #define pud_addr_end(addr, end) \ | ||
157 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ | ||
158 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | ||
159 | }) | ||
160 | #endif | ||
161 | |||
162 | #ifndef pmd_addr_end | ||
163 | #define pmd_addr_end(addr, end) \ | ||
164 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ | ||
165 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | ||
166 | }) | ||
167 | #endif | ||
168 | |||
169 | #ifndef __ASSEMBLY__ | ||
170 | /* | ||
171 | * When walking page tables, we usually want to skip any p?d_none entries; | ||
172 | * and any p?d_bad entries - reporting the error before resetting to none. | ||
173 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. | ||
174 | */ | ||
175 | void pgd_clear_bad(pgd_t *); | ||
176 | void pud_clear_bad(pud_t *); | ||
177 | void pmd_clear_bad(pmd_t *); | ||
178 | |||
179 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) | ||
180 | { | ||
181 | if (pgd_none(*pgd)) | ||
182 | return 1; | ||
183 | if (unlikely(pgd_bad(*pgd))) { | ||
184 | pgd_clear_bad(pgd); | ||
185 | return 1; | ||
186 | } | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static inline int pud_none_or_clear_bad(pud_t *pud) | ||
191 | { | ||
192 | if (pud_none(*pud)) | ||
193 | return 1; | ||
194 | if (unlikely(pud_bad(*pud))) { | ||
195 | pud_clear_bad(pud); | ||
196 | return 1; | ||
197 | } | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) | ||
202 | { | ||
203 | if (pmd_none(*pmd)) | ||
204 | return 1; | ||
205 | if (unlikely(pmd_bad(*pmd))) { | ||
206 | pmd_clear_bad(pmd); | ||
207 | return 1; | ||
208 | } | ||
209 | return 0; | ||
210 | } | ||
211 | #endif /* !__ASSEMBLY__ */ | ||
212 | |||
213 | #endif /* _ASM_GENERIC_PGTABLE_H */ | ||
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h new file mode 100644 index 000000000000..b1fcda9eac23 --- /dev/null +++ b/include/asm-generic/resource.h | |||
@@ -0,0 +1,88 @@ | |||
1 | #ifndef _ASM_GENERIC_RESOURCE_H | ||
2 | #define _ASM_GENERIC_RESOURCE_H | ||
3 | |||
4 | /* | ||
5 | * Resource limit IDs | ||
6 | * | ||
7 | * ( Compatibility detail: there are architectures that have | ||
8 | * a different rlimit ID order in the 5-9 range and want | ||
9 | * to keep that order for binary compatibility. The reasons | ||
10 | * are historic and all new rlimits are identical across all | ||
11 | * arches. If an arch has such special order for some rlimits | ||
12 | * then it defines them prior including asm-generic/resource.h. ) | ||
13 | */ | ||
14 | |||
15 | #define RLIMIT_CPU 0 /* CPU time in ms */ | ||
16 | #define RLIMIT_FSIZE 1 /* Maximum filesize */ | ||
17 | #define RLIMIT_DATA 2 /* max data size */ | ||
18 | #define RLIMIT_STACK 3 /* max stack size */ | ||
19 | #define RLIMIT_CORE 4 /* max core file size */ | ||
20 | |||
21 | #ifndef RLIMIT_RSS | ||
22 | # define RLIMIT_RSS 5 /* max resident set size */ | ||
23 | #endif | ||
24 | |||
25 | #ifndef RLIMIT_NPROC | ||
26 | # define RLIMIT_NPROC 6 /* max number of processes */ | ||
27 | #endif | ||
28 | |||
29 | #ifndef RLIMIT_NOFILE | ||
30 | # define RLIMIT_NOFILE 7 /* max number of open files */ | ||
31 | #endif | ||
32 | |||
33 | #ifndef RLIMIT_MEMLOCK | ||
34 | # define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ | ||
35 | #endif | ||
36 | |||
37 | #ifndef RLIMIT_AS | ||
38 | # define RLIMIT_AS 9 /* address space limit */ | ||
39 | #endif | ||
40 | |||
41 | #define RLIMIT_LOCKS 10 /* maximum file locks held */ | ||
42 | #define RLIMIT_SIGPENDING 11 /* max number of pending signals */ | ||
43 | #define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */ | ||
44 | |||
45 | #define RLIM_NLIMITS 13 | ||
46 | |||
47 | /* | ||
48 | * SuS says limits have to be unsigned. | ||
49 | * Which makes a ton more sense anyway. | ||
50 | * | ||
51 | * Some architectures override this (for compatibility reasons): | ||
52 | */ | ||
53 | #ifndef RLIM_INFINITY | ||
54 | # define RLIM_INFINITY (~0UL) | ||
55 | #endif | ||
56 | |||
57 | /* | ||
58 | * RLIMIT_STACK default maximum - some architectures override it: | ||
59 | */ | ||
60 | #ifndef _STK_LIM_MAX | ||
61 | # define _STK_LIM_MAX RLIM_INFINITY | ||
62 | #endif | ||
63 | |||
64 | #ifdef __KERNEL__ | ||
65 | |||
66 | /* | ||
67 | * boot-time rlimit defaults for the init task: | ||
68 | */ | ||
69 | #define INIT_RLIMITS \ | ||
70 | { \ | ||
71 | [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
72 | [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
73 | [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
74 | [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \ | ||
75 | [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ | ||
76 | [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
77 | [RLIMIT_NPROC] = { 0, 0 }, \ | ||
78 | [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \ | ||
79 | [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ | ||
80 | [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
81 | [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
82 | [RLIMIT_SIGPENDING] = { 0, 0 }, \ | ||
83 | [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ | ||
84 | } | ||
85 | |||
86 | #endif /* __KERNEL__ */ | ||
87 | |||
88 | #endif | ||
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h new file mode 100644 index 000000000000..cef08db34ada --- /dev/null +++ b/include/asm-generic/rtc.h | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * inclue/asm-generic/rtc.h | ||
3 | * | ||
4 | * Author: Tom Rini <trini@mvista.com> | ||
5 | * | ||
6 | * Based on: | ||
7 | * drivers/char/rtc.c | ||
8 | * | ||
9 | * Please read the COPYING file for all license details. | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_RTC_H__ | ||
13 | #define __ASM_RTC_H__ | ||
14 | |||
15 | #ifdef __KERNEL__ | ||
16 | |||
17 | #include <linux/mc146818rtc.h> | ||
18 | #include <linux/rtc.h> | ||
19 | #include <linux/bcd.h> | ||
20 | |||
21 | #define RTC_PIE 0x40 /* periodic interrupt enable */ | ||
22 | #define RTC_AIE 0x20 /* alarm interrupt enable */ | ||
23 | #define RTC_UIE 0x10 /* update-finished interrupt enable */ | ||
24 | |||
25 | /* some dummy definitions */ | ||
26 | #define RTC_BATT_BAD 0x100 /* battery bad */ | ||
27 | #define RTC_SQWE 0x08 /* enable square-wave output */ | ||
28 | #define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ | ||
29 | #define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ | ||
30 | #define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ | ||
31 | |||
32 | /* | ||
33 | * Returns true if a clock update is in progress | ||
34 | */ | ||
35 | static inline unsigned char rtc_is_updating(void) | ||
36 | { | ||
37 | unsigned char uip; | ||
38 | |||
39 | spin_lock_irq(&rtc_lock); | ||
40 | uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); | ||
41 | spin_unlock_irq(&rtc_lock); | ||
42 | return uip; | ||
43 | } | ||
44 | |||
45 | static inline unsigned int get_rtc_time(struct rtc_time *time) | ||
46 | { | ||
47 | unsigned long uip_watchdog = jiffies; | ||
48 | unsigned char ctrl; | ||
49 | #ifdef CONFIG_MACH_DECSTATION | ||
50 | unsigned int real_year; | ||
51 | #endif | ||
52 | |||
53 | /* | ||
54 | * read RTC once any update in progress is done. The update | ||
55 | * can take just over 2ms. We wait 10 to 20ms. There is no need to | ||
56 | * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. | ||
57 | * If you need to know *exactly* when a second has started, enable | ||
58 | * periodic update complete interrupts, (via ioctl) and then | ||
59 | * immediately read /dev/rtc which will block until you get the IRQ. | ||
60 | * Once the read clears, read the RTC time (again via ioctl). Easy. | ||
61 | */ | ||
62 | |||
63 | if (rtc_is_updating() != 0) | ||
64 | while (jiffies - uip_watchdog < 2*HZ/100) { | ||
65 | barrier(); | ||
66 | cpu_relax(); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Only the values that we read from the RTC are set. We leave | ||
71 | * tm_wday, tm_yday and tm_isdst untouched. Even though the | ||
72 | * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated | ||
73 | * by the RTC when initially set to a non-zero value. | ||
74 | */ | ||
75 | spin_lock_irq(&rtc_lock); | ||
76 | time->tm_sec = CMOS_READ(RTC_SECONDS); | ||
77 | time->tm_min = CMOS_READ(RTC_MINUTES); | ||
78 | time->tm_hour = CMOS_READ(RTC_HOURS); | ||
79 | time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); | ||
80 | time->tm_mon = CMOS_READ(RTC_MONTH); | ||
81 | time->tm_year = CMOS_READ(RTC_YEAR); | ||
82 | #ifdef CONFIG_MACH_DECSTATION | ||
83 | real_year = CMOS_READ(RTC_DEC_YEAR); | ||
84 | #endif | ||
85 | ctrl = CMOS_READ(RTC_CONTROL); | ||
86 | spin_unlock_irq(&rtc_lock); | ||
87 | |||
88 | if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
89 | { | ||
90 | BCD_TO_BIN(time->tm_sec); | ||
91 | BCD_TO_BIN(time->tm_min); | ||
92 | BCD_TO_BIN(time->tm_hour); | ||
93 | BCD_TO_BIN(time->tm_mday); | ||
94 | BCD_TO_BIN(time->tm_mon); | ||
95 | BCD_TO_BIN(time->tm_year); | ||
96 | } | ||
97 | |||
98 | #ifdef CONFIG_MACH_DECSTATION | ||
99 | time->tm_year += real_year - 72; | ||
100 | #endif | ||
101 | |||
102 | /* | ||
103 | * Account for differences between how the RTC uses the values | ||
104 | * and how they are defined in a struct rtc_time; | ||
105 | */ | ||
106 | if (time->tm_year <= 69) | ||
107 | time->tm_year += 100; | ||
108 | |||
109 | time->tm_mon--; | ||
110 | |||
111 | return RTC_24H; | ||
112 | } | ||
113 | |||
114 | /* Set the current date and time in the real time clock. */ | ||
115 | static inline int set_rtc_time(struct rtc_time *time) | ||
116 | { | ||
117 | unsigned char mon, day, hrs, min, sec; | ||
118 | unsigned char save_control, save_freq_select; | ||
119 | unsigned int yrs; | ||
120 | #ifdef CONFIG_MACH_DECSTATION | ||
121 | unsigned int real_yrs, leap_yr; | ||
122 | #endif | ||
123 | |||
124 | yrs = time->tm_year; | ||
125 | mon = time->tm_mon + 1; /* tm_mon starts at zero */ | ||
126 | day = time->tm_mday; | ||
127 | hrs = time->tm_hour; | ||
128 | min = time->tm_min; | ||
129 | sec = time->tm_sec; | ||
130 | |||
131 | if (yrs > 255) /* They are unsigned */ | ||
132 | return -EINVAL; | ||
133 | |||
134 | spin_lock_irq(&rtc_lock); | ||
135 | #ifdef CONFIG_MACH_DECSTATION | ||
136 | real_yrs = yrs; | ||
137 | leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) || | ||
138 | !((yrs + 1900) % 400)); | ||
139 | yrs = 72; | ||
140 | |||
141 | /* | ||
142 | * We want to keep the year set to 73 until March | ||
143 | * for non-leap years, so that Feb, 29th is handled | ||
144 | * correctly. | ||
145 | */ | ||
146 | if (!leap_yr && mon < 3) { | ||
147 | real_yrs--; | ||
148 | yrs = 73; | ||
149 | } | ||
150 | #endif | ||
151 | /* These limits and adjustments are independent of | ||
152 | * whether the chip is in binary mode or not. | ||
153 | */ | ||
154 | if (yrs > 169) { | ||
155 | spin_unlock_irq(&rtc_lock); | ||
156 | return -EINVAL; | ||
157 | } | ||
158 | |||
159 | if (yrs >= 100) | ||
160 | yrs -= 100; | ||
161 | |||
162 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) | ||
163 | || RTC_ALWAYS_BCD) { | ||
164 | BIN_TO_BCD(sec); | ||
165 | BIN_TO_BCD(min); | ||
166 | BIN_TO_BCD(hrs); | ||
167 | BIN_TO_BCD(day); | ||
168 | BIN_TO_BCD(mon); | ||
169 | BIN_TO_BCD(yrs); | ||
170 | } | ||
171 | |||
172 | save_control = CMOS_READ(RTC_CONTROL); | ||
173 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); | ||
174 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); | ||
175 | CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); | ||
176 | |||
177 | #ifdef CONFIG_MACH_DECSTATION | ||
178 | CMOS_WRITE(real_yrs, RTC_DEC_YEAR); | ||
179 | #endif | ||
180 | CMOS_WRITE(yrs, RTC_YEAR); | ||
181 | CMOS_WRITE(mon, RTC_MONTH); | ||
182 | CMOS_WRITE(day, RTC_DAY_OF_MONTH); | ||
183 | CMOS_WRITE(hrs, RTC_HOURS); | ||
184 | CMOS_WRITE(min, RTC_MINUTES); | ||
185 | CMOS_WRITE(sec, RTC_SECONDS); | ||
186 | |||
187 | CMOS_WRITE(save_control, RTC_CONTROL); | ||
188 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | ||
189 | |||
190 | spin_unlock_irq(&rtc_lock); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static inline unsigned int get_rtc_ss(void) | ||
196 | { | ||
197 | struct rtc_time h; | ||
198 | |||
199 | get_rtc_time(&h); | ||
200 | return h.tm_sec; | ||
201 | } | ||
202 | |||
203 | static inline int get_rtc_pll(struct rtc_pll_info *pll) | ||
204 | { | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | static inline int set_rtc_pll(struct rtc_pll_info *pll) | ||
208 | { | ||
209 | return -EINVAL; | ||
210 | } | ||
211 | |||
212 | #endif /* __KERNEL__ */ | ||
213 | #endif /* __ASM_RTC_H__ */ | ||
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h new file mode 100644 index 000000000000..976ac29598b7 --- /dev/null +++ b/include/asm-generic/sections.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASM_GENERIC_SECTIONS_H_ | ||
2 | #define _ASM_GENERIC_SECTIONS_H_ | ||
3 | |||
4 | /* References to section boundaries */ | ||
5 | |||
6 | extern char _text[], _stext[], _etext[]; | ||
7 | extern char _data[], _sdata[], _edata[]; | ||
8 | extern char __bss_start[], __bss_stop[]; | ||
9 | extern char __init_begin[], __init_end[]; | ||
10 | extern char _sinittext[], _einittext[]; | ||
11 | extern char _end[]; | ||
12 | |||
13 | #endif /* _ASM_GENERIC_SECTIONS_H_ */ | ||
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h new file mode 100644 index 000000000000..9cac8e8dde51 --- /dev/null +++ b/include/asm-generic/siginfo.h | |||
@@ -0,0 +1,287 @@ | |||
1 | #ifndef _ASM_GENERIC_SIGINFO_H | ||
2 | #define _ASM_GENERIC_SIGINFO_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | typedef union sigval { | ||
8 | int sival_int; | ||
9 | void __user *sival_ptr; | ||
10 | } sigval_t; | ||
11 | |||
12 | /* | ||
13 | * This is the size (including padding) of the part of the | ||
14 | * struct siginfo that is before the union. | ||
15 | */ | ||
16 | #ifndef __ARCH_SI_PREAMBLE_SIZE | ||
17 | #define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) | ||
18 | #endif | ||
19 | |||
20 | #define SI_MAX_SIZE 128 | ||
21 | #ifndef SI_PAD_SIZE | ||
22 | #define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int)) | ||
23 | #endif | ||
24 | |||
25 | #ifndef __ARCH_SI_UID_T | ||
26 | #define __ARCH_SI_UID_T uid_t | ||
27 | #endif | ||
28 | |||
29 | /* | ||
30 | * The default "si_band" type is "long", as specified by POSIX. | ||
31 | * However, some architectures want to override this to "int" | ||
32 | * for historical compatibility reasons, so we allow that. | ||
33 | */ | ||
34 | #ifndef __ARCH_SI_BAND_T | ||
35 | #define __ARCH_SI_BAND_T long | ||
36 | #endif | ||
37 | |||
38 | #ifndef HAVE_ARCH_SIGINFO_T | ||
39 | |||
40 | typedef struct siginfo { | ||
41 | int si_signo; | ||
42 | int si_errno; | ||
43 | int si_code; | ||
44 | |||
45 | union { | ||
46 | int _pad[SI_PAD_SIZE]; | ||
47 | |||
48 | /* kill() */ | ||
49 | struct { | ||
50 | pid_t _pid; /* sender's pid */ | ||
51 | __ARCH_SI_UID_T _uid; /* sender's uid */ | ||
52 | } _kill; | ||
53 | |||
54 | /* POSIX.1b timers */ | ||
55 | struct { | ||
56 | timer_t _tid; /* timer id */ | ||
57 | int _overrun; /* overrun count */ | ||
58 | char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; | ||
59 | sigval_t _sigval; /* same as below */ | ||
60 | int _sys_private; /* not to be passed to user */ | ||
61 | } _timer; | ||
62 | |||
63 | /* POSIX.1b signals */ | ||
64 | struct { | ||
65 | pid_t _pid; /* sender's pid */ | ||
66 | __ARCH_SI_UID_T _uid; /* sender's uid */ | ||
67 | sigval_t _sigval; | ||
68 | } _rt; | ||
69 | |||
70 | /* SIGCHLD */ | ||
71 | struct { | ||
72 | pid_t _pid; /* which child */ | ||
73 | __ARCH_SI_UID_T _uid; /* sender's uid */ | ||
74 | int _status; /* exit code */ | ||
75 | clock_t _utime; | ||
76 | clock_t _stime; | ||
77 | } _sigchld; | ||
78 | |||
79 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
80 | struct { | ||
81 | void __user *_addr; /* faulting insn/memory ref. */ | ||
82 | #ifdef __ARCH_SI_TRAPNO | ||
83 | int _trapno; /* TRAP # which caused the signal */ | ||
84 | #endif | ||
85 | } _sigfault; | ||
86 | |||
87 | /* SIGPOLL */ | ||
88 | struct { | ||
89 | __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
90 | int _fd; | ||
91 | } _sigpoll; | ||
92 | } _sifields; | ||
93 | } siginfo_t; | ||
94 | |||
95 | #endif | ||
96 | |||
97 | /* | ||
98 | * How these fields are to be accessed. | ||
99 | */ | ||
100 | #define si_pid _sifields._kill._pid | ||
101 | #define si_uid _sifields._kill._uid | ||
102 | #define si_tid _sifields._timer._tid | ||
103 | #define si_overrun _sifields._timer._overrun | ||
104 | #define si_sys_private _sifields._timer._sys_private | ||
105 | #define si_status _sifields._sigchld._status | ||
106 | #define si_utime _sifields._sigchld._utime | ||
107 | #define si_stime _sifields._sigchld._stime | ||
108 | #define si_value _sifields._rt._sigval | ||
109 | #define si_int _sifields._rt._sigval.sival_int | ||
110 | #define si_ptr _sifields._rt._sigval.sival_ptr | ||
111 | #define si_addr _sifields._sigfault._addr | ||
112 | #ifdef __ARCH_SI_TRAPNO | ||
113 | #define si_trapno _sifields._sigfault._trapno | ||
114 | #endif | ||
115 | #define si_band _sifields._sigpoll._band | ||
116 | #define si_fd _sifields._sigpoll._fd | ||
117 | |||
118 | #ifdef __KERNEL__ | ||
119 | #define __SI_MASK 0xffff0000u | ||
120 | #define __SI_KILL (0 << 16) | ||
121 | #define __SI_TIMER (1 << 16) | ||
122 | #define __SI_POLL (2 << 16) | ||
123 | #define __SI_FAULT (3 << 16) | ||
124 | #define __SI_CHLD (4 << 16) | ||
125 | #define __SI_RT (5 << 16) | ||
126 | #define __SI_MESGQ (6 << 16) | ||
127 | #define __SI_CODE(T,N) ((T) | ((N) & 0xffff)) | ||
128 | #else | ||
129 | #define __SI_KILL 0 | ||
130 | #define __SI_TIMER 0 | ||
131 | #define __SI_POLL 0 | ||
132 | #define __SI_FAULT 0 | ||
133 | #define __SI_CHLD 0 | ||
134 | #define __SI_RT 0 | ||
135 | #define __SI_MESGQ 0 | ||
136 | #define __SI_CODE(T,N) (N) | ||
137 | #endif | ||
138 | |||
139 | /* | ||
140 | * si_code values | ||
141 | * Digital reserves positive values for kernel-generated signals. | ||
142 | */ | ||
143 | #define SI_USER 0 /* sent by kill, sigsend, raise */ | ||
144 | #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */ | ||
145 | #define SI_QUEUE -1 /* sent by sigqueue */ | ||
146 | #define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */ | ||
147 | #define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */ | ||
148 | #define SI_ASYNCIO -4 /* sent by AIO completion */ | ||
149 | #define SI_SIGIO -5 /* sent by queued SIGIO */ | ||
150 | #define SI_TKILL -6 /* sent by tkill system call */ | ||
151 | #define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */ | ||
152 | |||
153 | #define SI_FROMUSER(siptr) ((siptr)->si_code <= 0) | ||
154 | #define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0) | ||
155 | |||
156 | /* | ||
157 | * SIGILL si_codes | ||
158 | */ | ||
159 | #define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */ | ||
160 | #define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */ | ||
161 | #define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */ | ||
162 | #define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */ | ||
163 | #define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */ | ||
164 | #define ILL_PRVREG (__SI_FAULT|6) /* privileged register */ | ||
165 | #define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */ | ||
166 | #define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */ | ||
167 | #define NSIGILL 8 | ||
168 | |||
169 | /* | ||
170 | * SIGFPE si_codes | ||
171 | */ | ||
172 | #define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */ | ||
173 | #define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */ | ||
174 | #define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */ | ||
175 | #define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */ | ||
176 | #define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */ | ||
177 | #define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */ | ||
178 | #define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */ | ||
179 | #define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */ | ||
180 | #define NSIGFPE 8 | ||
181 | |||
182 | /* | ||
183 | * SIGSEGV si_codes | ||
184 | */ | ||
185 | #define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */ | ||
186 | #define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */ | ||
187 | #define NSIGSEGV 2 | ||
188 | |||
189 | /* | ||
190 | * SIGBUS si_codes | ||
191 | */ | ||
192 | #define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */ | ||
193 | #define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */ | ||
194 | #define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */ | ||
195 | #define NSIGBUS 3 | ||
196 | |||
197 | /* | ||
198 | * SIGTRAP si_codes | ||
199 | */ | ||
200 | #define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */ | ||
201 | #define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */ | ||
202 | #define NSIGTRAP 2 | ||
203 | |||
204 | /* | ||
205 | * SIGCHLD si_codes | ||
206 | */ | ||
207 | #define CLD_EXITED (__SI_CHLD|1) /* child has exited */ | ||
208 | #define CLD_KILLED (__SI_CHLD|2) /* child was killed */ | ||
209 | #define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */ | ||
210 | #define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */ | ||
211 | #define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */ | ||
212 | #define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */ | ||
213 | #define NSIGCHLD 6 | ||
214 | |||
215 | /* | ||
216 | * SIGPOLL si_codes | ||
217 | */ | ||
218 | #define POLL_IN (__SI_POLL|1) /* data input available */ | ||
219 | #define POLL_OUT (__SI_POLL|2) /* output buffers available */ | ||
220 | #define POLL_MSG (__SI_POLL|3) /* input message available */ | ||
221 | #define POLL_ERR (__SI_POLL|4) /* i/o error */ | ||
222 | #define POLL_PRI (__SI_POLL|5) /* high priority input available */ | ||
223 | #define POLL_HUP (__SI_POLL|6) /* device disconnected */ | ||
224 | #define NSIGPOLL 6 | ||
225 | |||
226 | /* | ||
227 | * sigevent definitions | ||
228 | * | ||
229 | * It seems likely that SIGEV_THREAD will have to be handled from | ||
230 | * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the | ||
231 | * thread manager then catches and does the appropriate nonsense. | ||
232 | * However, everything is written out here so as to not get lost. | ||
233 | */ | ||
234 | #define SIGEV_SIGNAL 0 /* notify via signal */ | ||
235 | #define SIGEV_NONE 1 /* other notification: meaningless */ | ||
236 | #define SIGEV_THREAD 2 /* deliver via thread creation */ | ||
237 | #define SIGEV_THREAD_ID 4 /* deliver to thread */ | ||
238 | |||
239 | #define SIGEV_MAX_SIZE 64 | ||
240 | #ifndef SIGEV_PAD_SIZE | ||
241 | #define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) | ||
242 | #endif | ||
243 | |||
244 | typedef struct sigevent { | ||
245 | sigval_t sigev_value; | ||
246 | int sigev_signo; | ||
247 | int sigev_notify; | ||
248 | union { | ||
249 | int _pad[SIGEV_PAD_SIZE]; | ||
250 | int _tid; | ||
251 | |||
252 | struct { | ||
253 | void (*_function)(sigval_t); | ||
254 | void *_attribute; /* really pthread_attr_t */ | ||
255 | } _sigev_thread; | ||
256 | } _sigev_un; | ||
257 | } sigevent_t; | ||
258 | |||
259 | #define sigev_notify_function _sigev_un._sigev_thread._function | ||
260 | #define sigev_notify_attributes _sigev_un._sigev_thread._attribute | ||
261 | #define sigev_notify_thread_id _sigev_un._tid | ||
262 | |||
263 | #ifdef __KERNEL__ | ||
264 | |||
265 | struct siginfo; | ||
266 | void do_schedule_next_timer(struct siginfo *info); | ||
267 | |||
268 | #ifndef HAVE_ARCH_COPY_SIGINFO | ||
269 | |||
270 | #include <linux/string.h> | ||
271 | |||
272 | static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) | ||
273 | { | ||
274 | if (from->si_code < 0) | ||
275 | memcpy(to, from, sizeof(*to)); | ||
276 | else | ||
277 | /* _sigchld is currently the largest know union member */ | ||
278 | memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); | ||
279 | } | ||
280 | |||
281 | #endif | ||
282 | |||
283 | extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from); | ||
284 | |||
285 | #endif /* __KERNEL__ */ | ||
286 | |||
287 | #endif | ||
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h new file mode 100644 index 000000000000..1d01043e797d --- /dev/null +++ b/include/asm-generic/statfs.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef _GENERIC_STATFS_H | ||
2 | #define _GENERIC_STATFS_H | ||
3 | |||
4 | #ifndef __KERNEL_STRICT_NAMES | ||
5 | # include <linux/types.h> | ||
6 | typedef __kernel_fsid_t fsid_t; | ||
7 | #endif | ||
8 | |||
9 | struct statfs { | ||
10 | __u32 f_type; | ||
11 | __u32 f_bsize; | ||
12 | __u32 f_blocks; | ||
13 | __u32 f_bfree; | ||
14 | __u32 f_bavail; | ||
15 | __u32 f_files; | ||
16 | __u32 f_ffree; | ||
17 | __kernel_fsid_t f_fsid; | ||
18 | __u32 f_namelen; | ||
19 | __u32 f_frsize; | ||
20 | __u32 f_spare[5]; | ||
21 | }; | ||
22 | |||
23 | struct statfs64 { | ||
24 | __u32 f_type; | ||
25 | __u32 f_bsize; | ||
26 | __u64 f_blocks; | ||
27 | __u64 f_bfree; | ||
28 | __u64 f_bavail; | ||
29 | __u64 f_files; | ||
30 | __u64 f_ffree; | ||
31 | __kernel_fsid_t f_fsid; | ||
32 | __u32 f_namelen; | ||
33 | __u32 f_frsize; | ||
34 | __u32 f_spare[5]; | ||
35 | }; | ||
36 | |||
37 | struct compat_statfs64 { | ||
38 | __u32 f_type; | ||
39 | __u32 f_bsize; | ||
40 | __u64 f_blocks; | ||
41 | __u64 f_bfree; | ||
42 | __u64 f_bavail; | ||
43 | __u64 f_files; | ||
44 | __u64 f_ffree; | ||
45 | __kernel_fsid_t f_fsid; | ||
46 | __u32 f_namelen; | ||
47 | __u32 f_frsize; | ||
48 | __u32 f_spare[5]; | ||
49 | }; | ||
50 | |||
51 | #endif | ||
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h new file mode 100644 index 000000000000..1e58ca39592c --- /dev/null +++ b/include/asm-generic/termios.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* termios.h: generic termios/termio user copying/translation | ||
2 | */ | ||
3 | |||
4 | #ifndef _ASM_GENERIC_TERMIOS_H | ||
5 | #define _ASM_GENERIC_TERMIOS_H | ||
6 | |||
7 | #include <asm/uaccess.h> | ||
8 | |||
9 | #ifndef __ARCH_TERMIO_GETPUT | ||
10 | |||
11 | /* | ||
12 | * Translate a "termio" structure into a "termios". Ugh. | ||
13 | */ | ||
14 | static inline int user_termio_to_kernel_termios(struct termios *termios, | ||
15 | struct termio __user *termio) | ||
16 | { | ||
17 | unsigned short tmp; | ||
18 | |||
19 | if (get_user(tmp, &termio->c_iflag) < 0) | ||
20 | goto fault; | ||
21 | termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; | ||
22 | |||
23 | if (get_user(tmp, &termio->c_oflag) < 0) | ||
24 | goto fault; | ||
25 | termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; | ||
26 | |||
27 | if (get_user(tmp, &termio->c_cflag) < 0) | ||
28 | goto fault; | ||
29 | termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; | ||
30 | |||
31 | if (get_user(tmp, &termio->c_lflag) < 0) | ||
32 | goto fault; | ||
33 | termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; | ||
34 | |||
35 | if (get_user(termios->c_line, &termio->c_line) < 0) | ||
36 | goto fault; | ||
37 | |||
38 | if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) | ||
39 | goto fault; | ||
40 | |||
41 | return 0; | ||
42 | |||
43 | fault: | ||
44 | return -EFAULT; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Translate a "termios" structure into a "termio". Ugh. | ||
49 | */ | ||
50 | static inline int kernel_termios_to_user_termio(struct termio __user *termio, | ||
51 | struct termios *termios) | ||
52 | { | ||
53 | if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || | ||
54 | put_user(termios->c_oflag, &termio->c_oflag) < 0 || | ||
55 | put_user(termios->c_cflag, &termio->c_cflag) < 0 || | ||
56 | put_user(termios->c_lflag, &termio->c_lflag) < 0 || | ||
57 | put_user(termios->c_line, &termio->c_line) < 0 || | ||
58 | copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) | ||
59 | return -EFAULT; | ||
60 | |||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) | ||
65 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) | ||
66 | |||
67 | #endif /* __ARCH_TERMIO_GETPUT */ | ||
68 | |||
69 | #endif /* _ASM_GENERIC_TERMIOS_H */ | ||
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h new file mode 100644 index 000000000000..faff403e1061 --- /dev/null +++ b/include/asm-generic/tlb.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* asm-generic/tlb.h | ||
2 | * | ||
3 | * Generic TLB shootdown code | ||
4 | * | ||
5 | * Copyright 2001 Red Hat, Inc. | ||
6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | #ifndef _ASM_GENERIC__TLB_H | ||
14 | #define _ASM_GENERIC__TLB_H | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/swap.h> | ||
18 | #include <asm/pgalloc.h> | ||
19 | #include <asm/tlbflush.h> | ||
20 | |||
21 | /* | ||
22 | * For UP we don't need to worry about TLB flush | ||
23 | * and page free order so much.. | ||
24 | */ | ||
25 | #ifdef CONFIG_SMP | ||
26 | #define FREE_PTE_NR 506 | ||
27 | #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | ||
28 | #else | ||
29 | #define FREE_PTE_NR 1 | ||
30 | #define tlb_fast_mode(tlb) 1 | ||
31 | #endif | ||
32 | |||
33 | /* struct mmu_gather is an opaque type used by the mm code for passing around | ||
34 | * any data needed by arch specific code for tlb_remove_page. This structure | ||
35 | * can be per-CPU or per-MM as the page table lock is held for the duration of | ||
36 | * TLB shootdown. | ||
37 | */ | ||
38 | struct mmu_gather { | ||
39 | struct mm_struct *mm; | ||
40 | unsigned int nr; /* set to ~0U means fast mode */ | ||
41 | unsigned int need_flush;/* Really unmapped some ptes? */ | ||
42 | unsigned int fullmm; /* non-zero means full mm flush */ | ||
43 | unsigned long freed; | ||
44 | struct page * pages[FREE_PTE_NR]; | ||
45 | }; | ||
46 | |||
47 | /* Users of the generic TLB shootdown code must declare this storage space. */ | ||
48 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
49 | |||
50 | /* tlb_gather_mmu | ||
51 | * Return a pointer to an initialized struct mmu_gather. | ||
52 | */ | ||
53 | static inline struct mmu_gather * | ||
54 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | ||
55 | { | ||
56 | struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id()); | ||
57 | |||
58 | tlb->mm = mm; | ||
59 | |||
60 | /* Use fast mode if only one CPU is online */ | ||
61 | tlb->nr = num_online_cpus() > 1 ? 0U : ~0U; | ||
62 | |||
63 | tlb->fullmm = full_mm_flush; | ||
64 | tlb->freed = 0; | ||
65 | |||
66 | return tlb; | ||
67 | } | ||
68 | |||
69 | static inline void | ||
70 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
71 | { | ||
72 | if (!tlb->need_flush) | ||
73 | return; | ||
74 | tlb->need_flush = 0; | ||
75 | tlb_flush(tlb); | ||
76 | if (!tlb_fast_mode(tlb)) { | ||
77 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | ||
78 | tlb->nr = 0; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* tlb_finish_mmu | ||
83 | * Called at the end of the shootdown operation to free up any resources | ||
84 | * that were required. The page table lock is still held at this point. | ||
85 | */ | ||
86 | static inline void | ||
87 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | ||
88 | { | ||
89 | int freed = tlb->freed; | ||
90 | struct mm_struct *mm = tlb->mm; | ||
91 | int rss = get_mm_counter(mm, rss); | ||
92 | |||
93 | if (rss < freed) | ||
94 | freed = rss; | ||
95 | add_mm_counter(mm, rss, -freed); | ||
96 | tlb_flush_mmu(tlb, start, end); | ||
97 | |||
98 | /* keep the page table cache within bounds */ | ||
99 | check_pgt_cache(); | ||
100 | } | ||
101 | |||
102 | static inline unsigned int | ||
103 | tlb_is_full_mm(struct mmu_gather *tlb) | ||
104 | { | ||
105 | return tlb->fullmm; | ||
106 | } | ||
107 | |||
108 | /* tlb_remove_page | ||
109 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while | ||
110 | * handling the additional races in SMP caused by other CPUs caching valid | ||
111 | * mappings in their TLBs. | ||
112 | */ | ||
113 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
114 | { | ||
115 | tlb->need_flush = 1; | ||
116 | if (tlb_fast_mode(tlb)) { | ||
117 | free_page_and_swap_cache(page); | ||
118 | return; | ||
119 | } | ||
120 | tlb->pages[tlb->nr++] = page; | ||
121 | if (tlb->nr >= FREE_PTE_NR) | ||
122 | tlb_flush_mmu(tlb, 0, 0); | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | ||
127 | * | ||
128 | * Record the fact that pte's were really umapped in ->need_flush, so we can | ||
129 | * later optimise away the tlb invalidate. This helps when userspace is | ||
130 | * unmapping already-unmapped pages, which happens quite a lot. | ||
131 | */ | ||
132 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ | ||
133 | do { \ | ||
134 | tlb->need_flush = 1; \ | ||
135 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | ||
136 | } while (0) | ||
137 | |||
138 | #define pte_free_tlb(tlb, ptep) \ | ||
139 | do { \ | ||
140 | tlb->need_flush = 1; \ | ||
141 | __pte_free_tlb(tlb, ptep); \ | ||
142 | } while (0) | ||
143 | |||
144 | #ifndef __ARCH_HAS_4LEVEL_HACK | ||
145 | #define pud_free_tlb(tlb, pudp) \ | ||
146 | do { \ | ||
147 | tlb->need_flush = 1; \ | ||
148 | __pud_free_tlb(tlb, pudp); \ | ||
149 | } while (0) | ||
150 | #endif | ||
151 | |||
152 | #define pmd_free_tlb(tlb, pmdp) \ | ||
153 | do { \ | ||
154 | tlb->need_flush = 1; \ | ||
155 | __pmd_free_tlb(tlb, pmdp); \ | ||
156 | } while (0) | ||
157 | |||
158 | #define tlb_migrate_finish(mm) do {} while (0) | ||
159 | |||
160 | #endif /* _ASM_GENERIC__TLB_H */ | ||
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h new file mode 100644 index 000000000000..ec96e8b0f190 --- /dev/null +++ b/include/asm-generic/topology.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * linux/include/asm-generic/topology.h | ||
3 | * | ||
4 | * Written by: Matthew Dobson, IBM Corporation | ||
5 | * | ||
6 | * Copyright (C) 2002, IBM Corp. | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Send feedback to <colpatch@us.ibm.com> | ||
26 | */ | ||
27 | #ifndef _ASM_GENERIC_TOPOLOGY_H | ||
28 | #define _ASM_GENERIC_TOPOLOGY_H | ||
29 | |||
30 | /* Other architectures wishing to use this simple topology API should fill | ||
31 | in the below functions as appropriate in their own <asm/topology.h> file. */ | ||
32 | #ifndef cpu_to_node | ||
33 | #define cpu_to_node(cpu) (0) | ||
34 | #endif | ||
35 | #ifndef parent_node | ||
36 | #define parent_node(node) (0) | ||
37 | #endif | ||
38 | #ifndef node_to_cpumask | ||
39 | #define node_to_cpumask(node) (cpu_online_map) | ||
40 | #endif | ||
41 | #ifndef node_to_first_cpu | ||
42 | #define node_to_first_cpu(node) (0) | ||
43 | #endif | ||
44 | #ifndef pcibus_to_cpumask | ||
45 | #define pcibus_to_cpumask(bus) (cpu_online_map) | ||
46 | #endif | ||
47 | |||
48 | #endif /* _ASM_GENERIC_TOPOLOGY_H */ | ||
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h new file mode 100644 index 000000000000..549cb3a1640a --- /dev/null +++ b/include/asm-generic/uaccess.h | |||
@@ -0,0 +1,26 @@ | |||
1 | #ifndef _ASM_GENERIC_UACCESS_H_ | ||
2 | #define _ASM_GENERIC_UACCESS_H_ | ||
3 | |||
4 | /* | ||
5 | * This macro should be used instead of __get_user() when accessing | ||
6 | * values at locations that are not known to be aligned. | ||
7 | */ | ||
8 | #define __get_user_unaligned(x, ptr) \ | ||
9 | ({ \ | ||
10 | __typeof__ (*(ptr)) __x; \ | ||
11 | __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ | ||
12 | (x) = __x; \ | ||
13 | }) | ||
14 | |||
15 | |||
16 | /* | ||
17 | * This macro should be used instead of __put_user() when accessing | ||
18 | * values at locations that are not known to be aligned. | ||
19 | */ | ||
20 | #define __put_user_unaligned(x, ptr) \ | ||
21 | ({ \ | ||
22 | __typeof__ (*(ptr)) __x = (x); \ | ||
23 | __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ | ||
24 | }) | ||
25 | |||
26 | #endif /* _ASM_GENERIC_UACCESS_H */ | ||
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h new file mode 100644 index 000000000000..c856a43e3b45 --- /dev/null +++ b/include/asm-generic/unaligned.h | |||
@@ -0,0 +1,121 @@ | |||
1 | #ifndef _ASM_GENERIC_UNALIGNED_H_ | ||
2 | #define _ASM_GENERIC_UNALIGNED_H_ | ||
3 | |||
4 | /* | ||
5 | * For the benefit of those who are trying to port Linux to another | ||
6 | * architecture, here are some C-language equivalents. | ||
7 | * | ||
8 | * This is based almost entirely upon Richard Henderson's | ||
9 | * asm-alpha/unaligned.h implementation. Some comments were | ||
10 | * taken from David Mosberger's asm-ia64/unaligned.h header. | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | |||
15 | /* | ||
16 | * The main single-value unaligned transfer routines. | ||
17 | */ | ||
18 | #define get_unaligned(ptr) \ | ||
19 | ((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr)))) | ||
20 | #define put_unaligned(x,ptr) \ | ||
21 | __put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr))) | ||
22 | |||
23 | /* | ||
24 | * This function doesn't actually exist. The idea is that when | ||
25 | * someone uses the macros below with an unsupported size (datatype), | ||
26 | * the linker will alert us to the problem via an unresolved reference | ||
27 | * error. | ||
28 | */ | ||
29 | extern void bad_unaligned_access_length(void) __attribute__((noreturn)); | ||
30 | |||
31 | struct __una_u64 { __u64 x __attribute__((packed)); }; | ||
32 | struct __una_u32 { __u32 x __attribute__((packed)); }; | ||
33 | struct __una_u16 { __u16 x __attribute__((packed)); }; | ||
34 | |||
35 | /* | ||
36 | * Elemental unaligned loads | ||
37 | */ | ||
38 | |||
39 | static inline unsigned long __uldq(const __u64 *addr) | ||
40 | { | ||
41 | const struct __una_u64 *ptr = (const struct __una_u64 *) addr; | ||
42 | return ptr->x; | ||
43 | } | ||
44 | |||
45 | static inline unsigned long __uldl(const __u32 *addr) | ||
46 | { | ||
47 | const struct __una_u32 *ptr = (const struct __una_u32 *) addr; | ||
48 | return ptr->x; | ||
49 | } | ||
50 | |||
51 | static inline unsigned long __uldw(const __u16 *addr) | ||
52 | { | ||
53 | const struct __una_u16 *ptr = (const struct __una_u16 *) addr; | ||
54 | return ptr->x; | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * Elemental unaligned stores | ||
59 | */ | ||
60 | |||
61 | static inline void __ustq(__u64 val, __u64 *addr) | ||
62 | { | ||
63 | struct __una_u64 *ptr = (struct __una_u64 *) addr; | ||
64 | ptr->x = val; | ||
65 | } | ||
66 | |||
67 | static inline void __ustl(__u32 val, __u32 *addr) | ||
68 | { | ||
69 | struct __una_u32 *ptr = (struct __una_u32 *) addr; | ||
70 | ptr->x = val; | ||
71 | } | ||
72 | |||
73 | static inline void __ustw(__u16 val, __u16 *addr) | ||
74 | { | ||
75 | struct __una_u16 *ptr = (struct __una_u16 *) addr; | ||
76 | ptr->x = val; | ||
77 | } | ||
78 | |||
79 | static inline unsigned long __get_unaligned(const void *ptr, size_t size) | ||
80 | { | ||
81 | unsigned long val; | ||
82 | switch (size) { | ||
83 | case 1: | ||
84 | val = *(const __u8 *)ptr; | ||
85 | break; | ||
86 | case 2: | ||
87 | val = __uldw((const __u16 *)ptr); | ||
88 | break; | ||
89 | case 4: | ||
90 | val = __uldl((const __u32 *)ptr); | ||
91 | break; | ||
92 | case 8: | ||
93 | val = __uldq((const __u64 *)ptr); | ||
94 | break; | ||
95 | default: | ||
96 | bad_unaligned_access_length(); | ||
97 | }; | ||
98 | return val; | ||
99 | } | ||
100 | |||
101 | static inline void __put_unaligned(unsigned long val, void *ptr, size_t size) | ||
102 | { | ||
103 | switch (size) { | ||
104 | case 1: | ||
105 | *(__u8 *)ptr = val; | ||
106 | break; | ||
107 | case 2: | ||
108 | __ustw(val, (__u16 *)ptr); | ||
109 | break; | ||
110 | case 4: | ||
111 | __ustl(val, (__u32 *)ptr); | ||
112 | break; | ||
113 | case 8: | ||
114 | __ustq(val, (__u64 *)ptr); | ||
115 | break; | ||
116 | default: | ||
117 | bad_unaligned_access_length(); | ||
118 | }; | ||
119 | } | ||
120 | |||
121 | #endif /* _ASM_GENERIC_UNALIGNED_H */ | ||
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h new file mode 100644 index 000000000000..99cef06a364a --- /dev/null +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef LOAD_OFFSET | ||
2 | #define LOAD_OFFSET 0 | ||
3 | #endif | ||
4 | |||
5 | #ifndef VMLINUX_SYMBOL | ||
6 | #define VMLINUX_SYMBOL(_sym_) _sym_ | ||
7 | #endif | ||
8 | |||
9 | #define RODATA \ | ||
10 | .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ | ||
11 | *(.rodata) *(.rodata.*) \ | ||
12 | *(__vermagic) /* Kernel version magic */ \ | ||
13 | } \ | ||
14 | \ | ||
15 | .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ | ||
16 | *(.rodata1) \ | ||
17 | } \ | ||
18 | \ | ||
19 | /* PCI quirks */ \ | ||
20 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | ||
21 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | ||
22 | *(.pci_fixup_early) \ | ||
23 | VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ | ||
24 | VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ | ||
25 | *(.pci_fixup_header) \ | ||
26 | VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ | ||
27 | VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ | ||
28 | *(.pci_fixup_final) \ | ||
29 | VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ | ||
30 | VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ | ||
31 | *(.pci_fixup_enable) \ | ||
32 | VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ | ||
33 | } \ | ||
34 | \ | ||
35 | /* Kernel symbol table: Normal symbols */ \ | ||
36 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | ||
37 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | ||
38 | *(__ksymtab) \ | ||
39 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ | ||
40 | } \ | ||
41 | \ | ||
42 | /* Kernel symbol table: GPL-only symbols */ \ | ||
43 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ | ||
44 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ | ||
45 | *(__ksymtab_gpl) \ | ||
46 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ | ||
47 | } \ | ||
48 | \ | ||
49 | /* Kernel symbol table: Normal symbols */ \ | ||
50 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ | ||
51 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ | ||
52 | *(__kcrctab) \ | ||
53 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ | ||
54 | } \ | ||
55 | \ | ||
56 | /* Kernel symbol table: GPL-only symbols */ \ | ||
57 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ | ||
58 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ | ||
59 | *(__kcrctab_gpl) \ | ||
60 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ | ||
61 | } \ | ||
62 | \ | ||
63 | /* Kernel symbol table: strings */ \ | ||
64 | __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ | ||
65 | *(__ksymtab_strings) \ | ||
66 | } \ | ||
67 | \ | ||
68 | /* Built-in module parameters. */ \ | ||
69 | __param : AT(ADDR(__param) - LOAD_OFFSET) { \ | ||
70 | VMLINUX_SYMBOL(__start___param) = .; \ | ||
71 | *(__param) \ | ||
72 | VMLINUX_SYMBOL(__stop___param) = .; \ | ||
73 | } | ||
74 | |||
75 | #define SECURITY_INIT \ | ||
76 | .security_initcall.init : { \ | ||
77 | VMLINUX_SYMBOL(__security_initcall_start) = .; \ | ||
78 | *(.security_initcall.init) \ | ||
79 | VMLINUX_SYMBOL(__security_initcall_end) = .; \ | ||
80 | } | ||
81 | |||
82 | #define SCHED_TEXT \ | ||
83 | VMLINUX_SYMBOL(__sched_text_start) = .; \ | ||
84 | *(.sched.text) \ | ||
85 | VMLINUX_SYMBOL(__sched_text_end) = .; | ||
86 | |||
87 | #define LOCK_TEXT \ | ||
88 | VMLINUX_SYMBOL(__lock_text_start) = .; \ | ||
89 | *(.spinlock.text) \ | ||
90 | VMLINUX_SYMBOL(__lock_text_end) = .; | ||
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h new file mode 100644 index 000000000000..aaab875e1a35 --- /dev/null +++ b/include/asm-generic/xor.h | |||
@@ -0,0 +1,718 @@ | |||
1 | /* | ||
2 | * include/asm-generic/xor.h | ||
3 | * | ||
4 | * Generic optimized RAID-5 checksumming functions. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
13 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
14 | */ | ||
15 | |||
16 | #include <asm/processor.h> | ||
17 | |||
18 | static void | ||
19 | xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
20 | { | ||
21 | long lines = bytes / (sizeof (long)) / 8; | ||
22 | |||
23 | do { | ||
24 | p1[0] ^= p2[0]; | ||
25 | p1[1] ^= p2[1]; | ||
26 | p1[2] ^= p2[2]; | ||
27 | p1[3] ^= p2[3]; | ||
28 | p1[4] ^= p2[4]; | ||
29 | p1[5] ^= p2[5]; | ||
30 | p1[6] ^= p2[6]; | ||
31 | p1[7] ^= p2[7]; | ||
32 | p1 += 8; | ||
33 | p2 += 8; | ||
34 | } while (--lines > 0); | ||
35 | } | ||
36 | |||
37 | static void | ||
38 | xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
39 | unsigned long *p3) | ||
40 | { | ||
41 | long lines = bytes / (sizeof (long)) / 8; | ||
42 | |||
43 | do { | ||
44 | p1[0] ^= p2[0] ^ p3[0]; | ||
45 | p1[1] ^= p2[1] ^ p3[1]; | ||
46 | p1[2] ^= p2[2] ^ p3[2]; | ||
47 | p1[3] ^= p2[3] ^ p3[3]; | ||
48 | p1[4] ^= p2[4] ^ p3[4]; | ||
49 | p1[5] ^= p2[5] ^ p3[5]; | ||
50 | p1[6] ^= p2[6] ^ p3[6]; | ||
51 | p1[7] ^= p2[7] ^ p3[7]; | ||
52 | p1 += 8; | ||
53 | p2 += 8; | ||
54 | p3 += 8; | ||
55 | } while (--lines > 0); | ||
56 | } | ||
57 | |||
58 | static void | ||
59 | xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
60 | unsigned long *p3, unsigned long *p4) | ||
61 | { | ||
62 | long lines = bytes / (sizeof (long)) / 8; | ||
63 | |||
64 | do { | ||
65 | p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; | ||
66 | p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; | ||
67 | p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; | ||
68 | p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; | ||
69 | p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; | ||
70 | p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; | ||
71 | p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; | ||
72 | p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; | ||
73 | p1 += 8; | ||
74 | p2 += 8; | ||
75 | p3 += 8; | ||
76 | p4 += 8; | ||
77 | } while (--lines > 0); | ||
78 | } | ||
79 | |||
80 | static void | ||
81 | xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
82 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
83 | { | ||
84 | long lines = bytes / (sizeof (long)) / 8; | ||
85 | |||
86 | do { | ||
87 | p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; | ||
88 | p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; | ||
89 | p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; | ||
90 | p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; | ||
91 | p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; | ||
92 | p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; | ||
93 | p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; | ||
94 | p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; | ||
95 | p1 += 8; | ||
96 | p2 += 8; | ||
97 | p3 += 8; | ||
98 | p4 += 8; | ||
99 | p5 += 8; | ||
100 | } while (--lines > 0); | ||
101 | } | ||
102 | |||
103 | static void | ||
104 | xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
105 | { | ||
106 | long lines = bytes / (sizeof (long)) / 8; | ||
107 | |||
108 | do { | ||
109 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
110 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
111 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
112 | d2 = p1[2]; | ||
113 | d3 = p1[3]; | ||
114 | d4 = p1[4]; | ||
115 | d5 = p1[5]; | ||
116 | d6 = p1[6]; | ||
117 | d7 = p1[7]; | ||
118 | d0 ^= p2[0]; | ||
119 | d1 ^= p2[1]; | ||
120 | d2 ^= p2[2]; | ||
121 | d3 ^= p2[3]; | ||
122 | d4 ^= p2[4]; | ||
123 | d5 ^= p2[5]; | ||
124 | d6 ^= p2[6]; | ||
125 | d7 ^= p2[7]; | ||
126 | p1[0] = d0; /* Store the result (in bursts) */ | ||
127 | p1[1] = d1; | ||
128 | p1[2] = d2; | ||
129 | p1[3] = d3; | ||
130 | p1[4] = d4; | ||
131 | p1[5] = d5; | ||
132 | p1[6] = d6; | ||
133 | p1[7] = d7; | ||
134 | p1 += 8; | ||
135 | p2 += 8; | ||
136 | } while (--lines > 0); | ||
137 | } | ||
138 | |||
139 | static void | ||
140 | xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
141 | unsigned long *p3) | ||
142 | { | ||
143 | long lines = bytes / (sizeof (long)) / 8; | ||
144 | |||
145 | do { | ||
146 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
147 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
148 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
149 | d2 = p1[2]; | ||
150 | d3 = p1[3]; | ||
151 | d4 = p1[4]; | ||
152 | d5 = p1[5]; | ||
153 | d6 = p1[6]; | ||
154 | d7 = p1[7]; | ||
155 | d0 ^= p2[0]; | ||
156 | d1 ^= p2[1]; | ||
157 | d2 ^= p2[2]; | ||
158 | d3 ^= p2[3]; | ||
159 | d4 ^= p2[4]; | ||
160 | d5 ^= p2[5]; | ||
161 | d6 ^= p2[6]; | ||
162 | d7 ^= p2[7]; | ||
163 | d0 ^= p3[0]; | ||
164 | d1 ^= p3[1]; | ||
165 | d2 ^= p3[2]; | ||
166 | d3 ^= p3[3]; | ||
167 | d4 ^= p3[4]; | ||
168 | d5 ^= p3[5]; | ||
169 | d6 ^= p3[6]; | ||
170 | d7 ^= p3[7]; | ||
171 | p1[0] = d0; /* Store the result (in bursts) */ | ||
172 | p1[1] = d1; | ||
173 | p1[2] = d2; | ||
174 | p1[3] = d3; | ||
175 | p1[4] = d4; | ||
176 | p1[5] = d5; | ||
177 | p1[6] = d6; | ||
178 | p1[7] = d7; | ||
179 | p1 += 8; | ||
180 | p2 += 8; | ||
181 | p3 += 8; | ||
182 | } while (--lines > 0); | ||
183 | } | ||
184 | |||
185 | static void | ||
186 | xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
187 | unsigned long *p3, unsigned long *p4) | ||
188 | { | ||
189 | long lines = bytes / (sizeof (long)) / 8; | ||
190 | |||
191 | do { | ||
192 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
193 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
194 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
195 | d2 = p1[2]; | ||
196 | d3 = p1[3]; | ||
197 | d4 = p1[4]; | ||
198 | d5 = p1[5]; | ||
199 | d6 = p1[6]; | ||
200 | d7 = p1[7]; | ||
201 | d0 ^= p2[0]; | ||
202 | d1 ^= p2[1]; | ||
203 | d2 ^= p2[2]; | ||
204 | d3 ^= p2[3]; | ||
205 | d4 ^= p2[4]; | ||
206 | d5 ^= p2[5]; | ||
207 | d6 ^= p2[6]; | ||
208 | d7 ^= p2[7]; | ||
209 | d0 ^= p3[0]; | ||
210 | d1 ^= p3[1]; | ||
211 | d2 ^= p3[2]; | ||
212 | d3 ^= p3[3]; | ||
213 | d4 ^= p3[4]; | ||
214 | d5 ^= p3[5]; | ||
215 | d6 ^= p3[6]; | ||
216 | d7 ^= p3[7]; | ||
217 | d0 ^= p4[0]; | ||
218 | d1 ^= p4[1]; | ||
219 | d2 ^= p4[2]; | ||
220 | d3 ^= p4[3]; | ||
221 | d4 ^= p4[4]; | ||
222 | d5 ^= p4[5]; | ||
223 | d6 ^= p4[6]; | ||
224 | d7 ^= p4[7]; | ||
225 | p1[0] = d0; /* Store the result (in bursts) */ | ||
226 | p1[1] = d1; | ||
227 | p1[2] = d2; | ||
228 | p1[3] = d3; | ||
229 | p1[4] = d4; | ||
230 | p1[5] = d5; | ||
231 | p1[6] = d6; | ||
232 | p1[7] = d7; | ||
233 | p1 += 8; | ||
234 | p2 += 8; | ||
235 | p3 += 8; | ||
236 | p4 += 8; | ||
237 | } while (--lines > 0); | ||
238 | } | ||
239 | |||
240 | static void | ||
241 | xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
242 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
243 | { | ||
244 | long lines = bytes / (sizeof (long)) / 8; | ||
245 | |||
246 | do { | ||
247 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
248 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
249 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
250 | d2 = p1[2]; | ||
251 | d3 = p1[3]; | ||
252 | d4 = p1[4]; | ||
253 | d5 = p1[5]; | ||
254 | d6 = p1[6]; | ||
255 | d7 = p1[7]; | ||
256 | d0 ^= p2[0]; | ||
257 | d1 ^= p2[1]; | ||
258 | d2 ^= p2[2]; | ||
259 | d3 ^= p2[3]; | ||
260 | d4 ^= p2[4]; | ||
261 | d5 ^= p2[5]; | ||
262 | d6 ^= p2[6]; | ||
263 | d7 ^= p2[7]; | ||
264 | d0 ^= p3[0]; | ||
265 | d1 ^= p3[1]; | ||
266 | d2 ^= p3[2]; | ||
267 | d3 ^= p3[3]; | ||
268 | d4 ^= p3[4]; | ||
269 | d5 ^= p3[5]; | ||
270 | d6 ^= p3[6]; | ||
271 | d7 ^= p3[7]; | ||
272 | d0 ^= p4[0]; | ||
273 | d1 ^= p4[1]; | ||
274 | d2 ^= p4[2]; | ||
275 | d3 ^= p4[3]; | ||
276 | d4 ^= p4[4]; | ||
277 | d5 ^= p4[5]; | ||
278 | d6 ^= p4[6]; | ||
279 | d7 ^= p4[7]; | ||
280 | d0 ^= p5[0]; | ||
281 | d1 ^= p5[1]; | ||
282 | d2 ^= p5[2]; | ||
283 | d3 ^= p5[3]; | ||
284 | d4 ^= p5[4]; | ||
285 | d5 ^= p5[5]; | ||
286 | d6 ^= p5[6]; | ||
287 | d7 ^= p5[7]; | ||
288 | p1[0] = d0; /* Store the result (in bursts) */ | ||
289 | p1[1] = d1; | ||
290 | p1[2] = d2; | ||
291 | p1[3] = d3; | ||
292 | p1[4] = d4; | ||
293 | p1[5] = d5; | ||
294 | p1[6] = d6; | ||
295 | p1[7] = d7; | ||
296 | p1 += 8; | ||
297 | p2 += 8; | ||
298 | p3 += 8; | ||
299 | p4 += 8; | ||
300 | p5 += 8; | ||
301 | } while (--lines > 0); | ||
302 | } | ||
303 | |||
304 | static void | ||
305 | xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
306 | { | ||
307 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
308 | prefetchw(p1); | ||
309 | prefetch(p2); | ||
310 | |||
311 | do { | ||
312 | prefetchw(p1+8); | ||
313 | prefetch(p2+8); | ||
314 | once_more: | ||
315 | p1[0] ^= p2[0]; | ||
316 | p1[1] ^= p2[1]; | ||
317 | p1[2] ^= p2[2]; | ||
318 | p1[3] ^= p2[3]; | ||
319 | p1[4] ^= p2[4]; | ||
320 | p1[5] ^= p2[5]; | ||
321 | p1[6] ^= p2[6]; | ||
322 | p1[7] ^= p2[7]; | ||
323 | p1 += 8; | ||
324 | p2 += 8; | ||
325 | } while (--lines > 0); | ||
326 | if (lines == 0) | ||
327 | goto once_more; | ||
328 | } | ||
329 | |||
330 | static void | ||
331 | xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
332 | unsigned long *p3) | ||
333 | { | ||
334 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
335 | prefetchw(p1); | ||
336 | prefetch(p2); | ||
337 | prefetch(p3); | ||
338 | |||
339 | do { | ||
340 | prefetchw(p1+8); | ||
341 | prefetch(p2+8); | ||
342 | prefetch(p3+8); | ||
343 | once_more: | ||
344 | p1[0] ^= p2[0] ^ p3[0]; | ||
345 | p1[1] ^= p2[1] ^ p3[1]; | ||
346 | p1[2] ^= p2[2] ^ p3[2]; | ||
347 | p1[3] ^= p2[3] ^ p3[3]; | ||
348 | p1[4] ^= p2[4] ^ p3[4]; | ||
349 | p1[5] ^= p2[5] ^ p3[5]; | ||
350 | p1[6] ^= p2[6] ^ p3[6]; | ||
351 | p1[7] ^= p2[7] ^ p3[7]; | ||
352 | p1 += 8; | ||
353 | p2 += 8; | ||
354 | p3 += 8; | ||
355 | } while (--lines > 0); | ||
356 | if (lines == 0) | ||
357 | goto once_more; | ||
358 | } | ||
359 | |||
360 | static void | ||
361 | xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
362 | unsigned long *p3, unsigned long *p4) | ||
363 | { | ||
364 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
365 | |||
366 | prefetchw(p1); | ||
367 | prefetch(p2); | ||
368 | prefetch(p3); | ||
369 | prefetch(p4); | ||
370 | |||
371 | do { | ||
372 | prefetchw(p1+8); | ||
373 | prefetch(p2+8); | ||
374 | prefetch(p3+8); | ||
375 | prefetch(p4+8); | ||
376 | once_more: | ||
377 | p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; | ||
378 | p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; | ||
379 | p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; | ||
380 | p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; | ||
381 | p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; | ||
382 | p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; | ||
383 | p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; | ||
384 | p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; | ||
385 | p1 += 8; | ||
386 | p2 += 8; | ||
387 | p3 += 8; | ||
388 | p4 += 8; | ||
389 | } while (--lines > 0); | ||
390 | if (lines == 0) | ||
391 | goto once_more; | ||
392 | } | ||
393 | |||
394 | static void | ||
395 | xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
396 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
397 | { | ||
398 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
399 | |||
400 | prefetchw(p1); | ||
401 | prefetch(p2); | ||
402 | prefetch(p3); | ||
403 | prefetch(p4); | ||
404 | prefetch(p5); | ||
405 | |||
406 | do { | ||
407 | prefetchw(p1+8); | ||
408 | prefetch(p2+8); | ||
409 | prefetch(p3+8); | ||
410 | prefetch(p4+8); | ||
411 | prefetch(p5+8); | ||
412 | once_more: | ||
413 | p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; | ||
414 | p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; | ||
415 | p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; | ||
416 | p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; | ||
417 | p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; | ||
418 | p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; | ||
419 | p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; | ||
420 | p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; | ||
421 | p1 += 8; | ||
422 | p2 += 8; | ||
423 | p3 += 8; | ||
424 | p4 += 8; | ||
425 | p5 += 8; | ||
426 | } while (--lines > 0); | ||
427 | if (lines == 0) | ||
428 | goto once_more; | ||
429 | } | ||
430 | |||
431 | static void | ||
432 | xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
433 | { | ||
434 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
435 | |||
436 | prefetchw(p1); | ||
437 | prefetch(p2); | ||
438 | |||
439 | do { | ||
440 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
441 | |||
442 | prefetchw(p1+8); | ||
443 | prefetch(p2+8); | ||
444 | once_more: | ||
445 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
446 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
447 | d2 = p1[2]; | ||
448 | d3 = p1[3]; | ||
449 | d4 = p1[4]; | ||
450 | d5 = p1[5]; | ||
451 | d6 = p1[6]; | ||
452 | d7 = p1[7]; | ||
453 | d0 ^= p2[0]; | ||
454 | d1 ^= p2[1]; | ||
455 | d2 ^= p2[2]; | ||
456 | d3 ^= p2[3]; | ||
457 | d4 ^= p2[4]; | ||
458 | d5 ^= p2[5]; | ||
459 | d6 ^= p2[6]; | ||
460 | d7 ^= p2[7]; | ||
461 | p1[0] = d0; /* Store the result (in bursts) */ | ||
462 | p1[1] = d1; | ||
463 | p1[2] = d2; | ||
464 | p1[3] = d3; | ||
465 | p1[4] = d4; | ||
466 | p1[5] = d5; | ||
467 | p1[6] = d6; | ||
468 | p1[7] = d7; | ||
469 | p1 += 8; | ||
470 | p2 += 8; | ||
471 | } while (--lines > 0); | ||
472 | if (lines == 0) | ||
473 | goto once_more; | ||
474 | } | ||
475 | |||
476 | static void | ||
477 | xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
478 | unsigned long *p3) | ||
479 | { | ||
480 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
481 | |||
482 | prefetchw(p1); | ||
483 | prefetch(p2); | ||
484 | prefetch(p3); | ||
485 | |||
486 | do { | ||
487 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
488 | |||
489 | prefetchw(p1+8); | ||
490 | prefetch(p2+8); | ||
491 | prefetch(p3+8); | ||
492 | once_more: | ||
493 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
494 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
495 | d2 = p1[2]; | ||
496 | d3 = p1[3]; | ||
497 | d4 = p1[4]; | ||
498 | d5 = p1[5]; | ||
499 | d6 = p1[6]; | ||
500 | d7 = p1[7]; | ||
501 | d0 ^= p2[0]; | ||
502 | d1 ^= p2[1]; | ||
503 | d2 ^= p2[2]; | ||
504 | d3 ^= p2[3]; | ||
505 | d4 ^= p2[4]; | ||
506 | d5 ^= p2[5]; | ||
507 | d6 ^= p2[6]; | ||
508 | d7 ^= p2[7]; | ||
509 | d0 ^= p3[0]; | ||
510 | d1 ^= p3[1]; | ||
511 | d2 ^= p3[2]; | ||
512 | d3 ^= p3[3]; | ||
513 | d4 ^= p3[4]; | ||
514 | d5 ^= p3[5]; | ||
515 | d6 ^= p3[6]; | ||
516 | d7 ^= p3[7]; | ||
517 | p1[0] = d0; /* Store the result (in bursts) */ | ||
518 | p1[1] = d1; | ||
519 | p1[2] = d2; | ||
520 | p1[3] = d3; | ||
521 | p1[4] = d4; | ||
522 | p1[5] = d5; | ||
523 | p1[6] = d6; | ||
524 | p1[7] = d7; | ||
525 | p1 += 8; | ||
526 | p2 += 8; | ||
527 | p3 += 8; | ||
528 | } while (--lines > 0); | ||
529 | if (lines == 0) | ||
530 | goto once_more; | ||
531 | } | ||
532 | |||
533 | static void | ||
534 | xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
535 | unsigned long *p3, unsigned long *p4) | ||
536 | { | ||
537 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
538 | |||
539 | prefetchw(p1); | ||
540 | prefetch(p2); | ||
541 | prefetch(p3); | ||
542 | prefetch(p4); | ||
543 | |||
544 | do { | ||
545 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
546 | |||
547 | prefetchw(p1+8); | ||
548 | prefetch(p2+8); | ||
549 | prefetch(p3+8); | ||
550 | prefetch(p4+8); | ||
551 | once_more: | ||
552 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
553 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
554 | d2 = p1[2]; | ||
555 | d3 = p1[3]; | ||
556 | d4 = p1[4]; | ||
557 | d5 = p1[5]; | ||
558 | d6 = p1[6]; | ||
559 | d7 = p1[7]; | ||
560 | d0 ^= p2[0]; | ||
561 | d1 ^= p2[1]; | ||
562 | d2 ^= p2[2]; | ||
563 | d3 ^= p2[3]; | ||
564 | d4 ^= p2[4]; | ||
565 | d5 ^= p2[5]; | ||
566 | d6 ^= p2[6]; | ||
567 | d7 ^= p2[7]; | ||
568 | d0 ^= p3[0]; | ||
569 | d1 ^= p3[1]; | ||
570 | d2 ^= p3[2]; | ||
571 | d3 ^= p3[3]; | ||
572 | d4 ^= p3[4]; | ||
573 | d5 ^= p3[5]; | ||
574 | d6 ^= p3[6]; | ||
575 | d7 ^= p3[7]; | ||
576 | d0 ^= p4[0]; | ||
577 | d1 ^= p4[1]; | ||
578 | d2 ^= p4[2]; | ||
579 | d3 ^= p4[3]; | ||
580 | d4 ^= p4[4]; | ||
581 | d5 ^= p4[5]; | ||
582 | d6 ^= p4[6]; | ||
583 | d7 ^= p4[7]; | ||
584 | p1[0] = d0; /* Store the result (in bursts) */ | ||
585 | p1[1] = d1; | ||
586 | p1[2] = d2; | ||
587 | p1[3] = d3; | ||
588 | p1[4] = d4; | ||
589 | p1[5] = d5; | ||
590 | p1[6] = d6; | ||
591 | p1[7] = d7; | ||
592 | p1 += 8; | ||
593 | p2 += 8; | ||
594 | p3 += 8; | ||
595 | p4 += 8; | ||
596 | } while (--lines > 0); | ||
597 | if (lines == 0) | ||
598 | goto once_more; | ||
599 | } | ||
600 | |||
601 | static void | ||
602 | xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
603 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
604 | { | ||
605 | long lines = bytes / (sizeof (long)) / 8 - 1; | ||
606 | |||
607 | prefetchw(p1); | ||
608 | prefetch(p2); | ||
609 | prefetch(p3); | ||
610 | prefetch(p4); | ||
611 | prefetch(p5); | ||
612 | |||
613 | do { | ||
614 | register long d0, d1, d2, d3, d4, d5, d6, d7; | ||
615 | |||
616 | prefetchw(p1+8); | ||
617 | prefetch(p2+8); | ||
618 | prefetch(p3+8); | ||
619 | prefetch(p4+8); | ||
620 | prefetch(p5+8); | ||
621 | once_more: | ||
622 | d0 = p1[0]; /* Pull the stuff into registers */ | ||
623 | d1 = p1[1]; /* ... in bursts, if possible. */ | ||
624 | d2 = p1[2]; | ||
625 | d3 = p1[3]; | ||
626 | d4 = p1[4]; | ||
627 | d5 = p1[5]; | ||
628 | d6 = p1[6]; | ||
629 | d7 = p1[7]; | ||
630 | d0 ^= p2[0]; | ||
631 | d1 ^= p2[1]; | ||
632 | d2 ^= p2[2]; | ||
633 | d3 ^= p2[3]; | ||
634 | d4 ^= p2[4]; | ||
635 | d5 ^= p2[5]; | ||
636 | d6 ^= p2[6]; | ||
637 | d7 ^= p2[7]; | ||
638 | d0 ^= p3[0]; | ||
639 | d1 ^= p3[1]; | ||
640 | d2 ^= p3[2]; | ||
641 | d3 ^= p3[3]; | ||
642 | d4 ^= p3[4]; | ||
643 | d5 ^= p3[5]; | ||
644 | d6 ^= p3[6]; | ||
645 | d7 ^= p3[7]; | ||
646 | d0 ^= p4[0]; | ||
647 | d1 ^= p4[1]; | ||
648 | d2 ^= p4[2]; | ||
649 | d3 ^= p4[3]; | ||
650 | d4 ^= p4[4]; | ||
651 | d5 ^= p4[5]; | ||
652 | d6 ^= p4[6]; | ||
653 | d7 ^= p4[7]; | ||
654 | d0 ^= p5[0]; | ||
655 | d1 ^= p5[1]; | ||
656 | d2 ^= p5[2]; | ||
657 | d3 ^= p5[3]; | ||
658 | d4 ^= p5[4]; | ||
659 | d5 ^= p5[5]; | ||
660 | d6 ^= p5[6]; | ||
661 | d7 ^= p5[7]; | ||
662 | p1[0] = d0; /* Store the result (in bursts) */ | ||
663 | p1[1] = d1; | ||
664 | p1[2] = d2; | ||
665 | p1[3] = d3; | ||
666 | p1[4] = d4; | ||
667 | p1[5] = d5; | ||
668 | p1[6] = d6; | ||
669 | p1[7] = d7; | ||
670 | p1 += 8; | ||
671 | p2 += 8; | ||
672 | p3 += 8; | ||
673 | p4 += 8; | ||
674 | p5 += 8; | ||
675 | } while (--lines > 0); | ||
676 | if (lines == 0) | ||
677 | goto once_more; | ||
678 | } | ||
679 | |||
680 | static struct xor_block_template xor_block_8regs = { | ||
681 | .name = "8regs", | ||
682 | .do_2 = xor_8regs_2, | ||
683 | .do_3 = xor_8regs_3, | ||
684 | .do_4 = xor_8regs_4, | ||
685 | .do_5 = xor_8regs_5, | ||
686 | }; | ||
687 | |||
688 | static struct xor_block_template xor_block_32regs = { | ||
689 | .name = "32regs", | ||
690 | .do_2 = xor_32regs_2, | ||
691 | .do_3 = xor_32regs_3, | ||
692 | .do_4 = xor_32regs_4, | ||
693 | .do_5 = xor_32regs_5, | ||
694 | }; | ||
695 | |||
696 | static struct xor_block_template xor_block_8regs_p = { | ||
697 | .name = "8regs_prefetch", | ||
698 | .do_2 = xor_8regs_p_2, | ||
699 | .do_3 = xor_8regs_p_3, | ||
700 | .do_4 = xor_8regs_p_4, | ||
701 | .do_5 = xor_8regs_p_5, | ||
702 | }; | ||
703 | |||
704 | static struct xor_block_template xor_block_32regs_p = { | ||
705 | .name = "32regs_prefetch", | ||
706 | .do_2 = xor_32regs_p_2, | ||
707 | .do_3 = xor_32regs_p_3, | ||
708 | .do_4 = xor_32regs_p_4, | ||
709 | .do_5 = xor_32regs_p_5, | ||
710 | }; | ||
711 | |||
712 | #define XOR_TRY_TEMPLATES \ | ||
713 | do { \ | ||
714 | xor_speed(&xor_block_8regs); \ | ||
715 | xor_speed(&xor_block_8regs_p); \ | ||
716 | xor_speed(&xor_block_32regs); \ | ||
717 | xor_speed(&xor_block_32regs_p); \ | ||
718 | } while (0) | ||