diff options
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/dma-mapping.h | 12 | ||||
-rw-r--r-- | include/asm-i386/mach-default/do_timer.h | 2 | ||||
-rw-r--r-- | include/asm-i386/mach-summit/mach_apic.h | 2 | ||||
-rw-r--r-- | include/asm-i386/mach-visws/do_timer.h | 2 | ||||
-rw-r--r-- | include/asm-i386/mach-voyager/do_timer.h | 2 | ||||
-rw-r--r-- | include/asm-i386/nmi.h | 6 | ||||
-rw-r--r-- | include/asm-i386/pgtable-2level.h | 1 | ||||
-rw-r--r-- | include/asm-i386/pgtable-3level.h | 16 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 80 | ||||
-rw-r--r-- | include/asm-i386/smp.h | 4 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 4 |
11 files changed, 107 insertions, 24 deletions
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h index 576ae01d71c8..81999a3ebe7c 100644 --- a/include/asm-i386/dma-mapping.h +++ b/include/asm-i386/dma-mapping.h | |||
@@ -21,7 +21,7 @@ static inline dma_addr_t | |||
21 | dma_map_single(struct device *dev, void *ptr, size_t size, | 21 | dma_map_single(struct device *dev, void *ptr, size_t size, |
22 | enum dma_data_direction direction) | 22 | enum dma_data_direction direction) |
23 | { | 23 | { |
24 | BUG_ON(direction == DMA_NONE); | 24 | BUG_ON(!valid_dma_direction(direction)); |
25 | WARN_ON(size == 0); | 25 | WARN_ON(size == 0); |
26 | flush_write_buffers(); | 26 | flush_write_buffers(); |
27 | return virt_to_phys(ptr); | 27 | return virt_to_phys(ptr); |
@@ -31,7 +31,7 @@ static inline void | |||
31 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 31 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
32 | enum dma_data_direction direction) | 32 | enum dma_data_direction direction) |
33 | { | 33 | { |
34 | BUG_ON(direction == DMA_NONE); | 34 | BUG_ON(!valid_dma_direction(direction)); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline int | 37 | static inline int |
@@ -40,7 +40,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
40 | { | 40 | { |
41 | int i; | 41 | int i; |
42 | 42 | ||
43 | BUG_ON(direction == DMA_NONE); | 43 | BUG_ON(!valid_dma_direction(direction)); |
44 | WARN_ON(nents == 0 || sg[0].length == 0); | 44 | WARN_ON(nents == 0 || sg[0].length == 0); |
45 | 45 | ||
46 | for (i = 0; i < nents; i++ ) { | 46 | for (i = 0; i < nents; i++ ) { |
@@ -57,7 +57,7 @@ static inline dma_addr_t | |||
57 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, | 57 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, |
58 | size_t size, enum dma_data_direction direction) | 58 | size_t size, enum dma_data_direction direction) |
59 | { | 59 | { |
60 | BUG_ON(direction == DMA_NONE); | 60 | BUG_ON(!valid_dma_direction(direction)); |
61 | return page_to_phys(page) + offset; | 61 | return page_to_phys(page) + offset; |
62 | } | 62 | } |
63 | 63 | ||
@@ -65,7 +65,7 @@ static inline void | |||
65 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | 65 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
66 | enum dma_data_direction direction) | 66 | enum dma_data_direction direction) |
67 | { | 67 | { |
68 | BUG_ON(direction == DMA_NONE); | 68 | BUG_ON(!valid_dma_direction(direction)); |
69 | } | 69 | } |
70 | 70 | ||
71 | 71 | ||
@@ -73,7 +73,7 @@ static inline void | |||
73 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | 73 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
74 | enum dma_data_direction direction) | 74 | enum dma_data_direction direction) |
75 | { | 75 | { |
76 | BUG_ON(direction == DMA_NONE); | 76 | BUG_ON(!valid_dma_direction(direction)); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline void | 79 | static inline void |
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h index 6312c3e79814..4182c347ef85 100644 --- a/include/asm-i386/mach-default/do_timer.h +++ b/include/asm-i386/mach-default/do_timer.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) | 17 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) |
18 | { | 18 | { |
19 | do_timer(regs); | 19 | do_timer(1); |
20 | #ifndef CONFIG_SMP | 20 | #ifndef CONFIG_SMP |
21 | update_process_times(user_mode_vm(regs)); | 21 | update_process_times(user_mode_vm(regs)); |
22 | #endif | 22 | #endif |
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h index a81b05961595..254a0fe01c6a 100644 --- a/include/asm-i386/mach-summit/mach_apic.h +++ b/include/asm-i386/mach-summit/mach_apic.h | |||
@@ -88,7 +88,7 @@ static inline void clustered_apic_check(void) | |||
88 | 88 | ||
89 | static inline int apicid_to_node(int logical_apicid) | 89 | static inline int apicid_to_node(int logical_apicid) |
90 | { | 90 | { |
91 | return logical_apicid >> 5; /* 2 clusterids per CEC */ | 91 | return apicid_2_node[logical_apicid]; |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h index 95568e6ca91c..8db618c5a72b 100644 --- a/include/asm-i386/mach-visws/do_timer.h +++ b/include/asm-i386/mach-visws/do_timer.h | |||
@@ -9,7 +9,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) | |||
9 | /* Clear the interrupt */ | 9 | /* Clear the interrupt */ |
10 | co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR); | 10 | co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR); |
11 | 11 | ||
12 | do_timer(regs); | 12 | do_timer(1); |
13 | #ifndef CONFIG_SMP | 13 | #ifndef CONFIG_SMP |
14 | update_process_times(user_mode_vm(regs)); | 14 | update_process_times(user_mode_vm(regs)); |
15 | #endif | 15 | #endif |
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h index eaf518098981..099fe9f5c1b2 100644 --- a/include/asm-i386/mach-voyager/do_timer.h +++ b/include/asm-i386/mach-voyager/do_timer.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) | 4 | static inline void do_timer_interrupt_hook(struct pt_regs *regs) |
5 | { | 5 | { |
6 | do_timer(regs); | 6 | do_timer(1); |
7 | #ifndef CONFIG_SMP | 7 | #ifndef CONFIG_SMP |
8 | update_process_times(user_mode_vm(regs)); | 8 | update_process_times(user_mode_vm(regs)); |
9 | #endif | 9 | #endif |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 303bcd4592bb..269d315719ca 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -36,4 +36,10 @@ extern unsigned int nmi_watchdog; | |||
36 | #define NMI_LOCAL_APIC 2 | 36 | #define NMI_LOCAL_APIC 2 |
37 | #define NMI_INVALID 3 | 37 | #define NMI_INVALID 3 |
38 | 38 | ||
39 | struct ctl_table; | ||
40 | struct file; | ||
41 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
42 | void __user *, size_t *, loff_t *); | ||
43 | extern int unknown_nmi_panic; | ||
44 | |||
39 | #endif /* ASM_NMI_H */ | 45 | #endif /* ASM_NMI_H */ |
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 201c86a6711e..8d8d3b9ecdb0 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | 16 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) |
17 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 17 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) | 18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) |
19 | #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) | ||
19 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | 20 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) |
20 | 21 | ||
21 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 22 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index 0d899173232e..c2d701ea35be 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -58,7 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
58 | } | 58 | } |
59 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 59 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
60 | 60 | ||
61 | #define __HAVE_ARCH_SET_PTE_ATOMIC | 61 | /* |
62 | * Since this is only called on user PTEs, and the page fault handler | ||
63 | * must handle the already racy situation of simultaneous page faults, | ||
64 | * we are justified in merely clearing the PTE present bit, followed | ||
65 | * by a set. The ordering here is important. | ||
66 | */ | ||
67 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
68 | { | ||
69 | ptep->pte_low = 0; | ||
70 | smp_wmb(); | ||
71 | ptep->pte_high = pte.pte_high; | ||
72 | smp_wmb(); | ||
73 | ptep->pte_low = pte.pte_low; | ||
74 | } | ||
75 | |||
62 | #define set_pte_atomic(pteptr,pteval) \ | 76 | #define set_pte_atomic(pteptr,pteval) \ |
63 | set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) | 77 | set_64bit((unsigned long long *)(pteptr),pte_val(pteval)) |
64 | #define set_pmd(pmdptr,pmdval) \ | 78 | #define set_pmd(pmdptr,pmdval) \ |
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 541b3e234335..7d398f493dde 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
247 | #endif | 247 | #endif |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * Rules for using pte_update - it must be called after any PTE update which | ||
251 | * has not been done using the set_pte / clear_pte interfaces. It is used by | ||
252 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | ||
253 | * updates should either be sets, clears, or set_pte_atomic for P->P | ||
254 | * transitions, which means this hook should only be called for user PTEs. | ||
255 | * This hook implies a P->P protection or access change has taken place, which | ||
256 | * requires a subsequent TLB flush. The notification can optionally be delayed | ||
257 | * until the TLB flush event by using the pte_update_defer form of the | ||
258 | * interface, but care must be taken to assure that the flush happens while | ||
259 | * still holding the same page table lock so that the shadow and primary pages | ||
260 | * do not become out of sync on SMP. | ||
261 | */ | ||
262 | #define pte_update(mm, addr, ptep) do { } while (0) | ||
263 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | ||
264 | |||
265 | |||
266 | /* | ||
250 | * We only update the dirty/accessed state if we set | 267 | * We only update the dirty/accessed state if we set |
251 | * the dirty bit by hand in the kernel, since the hardware | 268 | * the dirty bit by hand in the kernel, since the hardware |
252 | * will do the accessed bit for us, and we don't want to | 269 | * will do the accessed bit for us, and we don't want to |
@@ -258,25 +275,54 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
258 | do { \ | 275 | do { \ |
259 | if (dirty) { \ | 276 | if (dirty) { \ |
260 | (ptep)->pte_low = (entry).pte_low; \ | 277 | (ptep)->pte_low = (entry).pte_low; \ |
278 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | ||
261 | flush_tlb_page(vma, address); \ | 279 | flush_tlb_page(vma, address); \ |
262 | } \ | 280 | } \ |
263 | } while (0) | 281 | } while (0) |
264 | 282 | ||
283 | /* | ||
284 | * We don't actually have these, but we want to advertise them so that | ||
285 | * we can encompass the flush here. | ||
286 | */ | ||
265 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 287 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
266 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
267 | { | ||
268 | if (!pte_dirty(*ptep)) | ||
269 | return 0; | ||
270 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); | ||
271 | } | ||
272 | |||
273 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 288 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
274 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 289 | |
275 | { | 290 | /* |
276 | if (!pte_young(*ptep)) | 291 | * Rules for using ptep_establish: the pte MUST be a user pte, and |
277 | return 0; | 292 | * must be a present->present transition. |
278 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); | 293 | */ |
279 | } | 294 | #define __HAVE_ARCH_PTEP_ESTABLISH |
295 | #define ptep_establish(vma, address, ptep, pteval) \ | ||
296 | do { \ | ||
297 | set_pte_present((vma)->vm_mm, address, ptep, pteval); \ | ||
298 | flush_tlb_page(vma, address); \ | ||
299 | } while (0) | ||
300 | |||
301 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | ||
302 | #define ptep_clear_flush_dirty(vma, address, ptep) \ | ||
303 | ({ \ | ||
304 | int __dirty; \ | ||
305 | __dirty = pte_dirty(*(ptep)); \ | ||
306 | if (__dirty) { \ | ||
307 | clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ | ||
308 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | ||
309 | flush_tlb_page(vma, address); \ | ||
310 | } \ | ||
311 | __dirty; \ | ||
312 | }) | ||
313 | |||
314 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
315 | #define ptep_clear_flush_young(vma, address, ptep) \ | ||
316 | ({ \ | ||
317 | int __young; \ | ||
318 | __young = pte_young(*(ptep)); \ | ||
319 | if (__young) { \ | ||
320 | clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ | ||
321 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | ||
322 | flush_tlb_page(vma, address); \ | ||
323 | } \ | ||
324 | __young; \ | ||
325 | }) | ||
280 | 326 | ||
281 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 327 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
282 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 328 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
@@ -295,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
295 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 341 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
296 | { | 342 | { |
297 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | 343 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); |
344 | pte_update(mm, addr, ptep); | ||
298 | } | 345 | } |
299 | 346 | ||
300 | /* | 347 | /* |
@@ -426,6 +473,13 @@ extern pte_t *lookup_address(unsigned long address); | |||
426 | #define pte_unmap_nested(pte) do { } while (0) | 473 | #define pte_unmap_nested(pte) do { } while (0) |
427 | #endif | 474 | #endif |
428 | 475 | ||
476 | /* Clear a kernel PTE and flush it from the TLB */ | ||
477 | #define kpte_clear_flush(ptep, vaddr) \ | ||
478 | do { \ | ||
479 | pte_clear(&init_mm, vaddr, ptep); \ | ||
480 | __flush_tlb_one(vaddr); \ | ||
481 | } while (0) | ||
482 | |||
429 | /* | 483 | /* |
430 | * The i386 doesn't have any external MMU info: the kernel page | 484 | * The i386 doesn't have any external MMU info: the kernel page |
431 | * tables contain all the necessary information. | 485 | * tables contain all the necessary information. |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 32ac8c91d5c5..6aa1206f6e2a 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -46,6 +46,8 @@ extern u8 x86_cpu_to_apicid[]; | |||
46 | 46 | ||
47 | #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] | 47 | #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] |
48 | 48 | ||
49 | extern u8 apicid_2_node[]; | ||
50 | |||
49 | #ifdef CONFIG_HOTPLUG_CPU | 51 | #ifdef CONFIG_HOTPLUG_CPU |
50 | extern void cpu_exit_clear(void); | 52 | extern void cpu_exit_clear(void); |
51 | extern void cpu_uninit(void); | 53 | extern void cpu_uninit(void); |
@@ -82,6 +84,7 @@ static inline int hard_smp_processor_id(void) | |||
82 | #endif | 84 | #endif |
83 | #endif | 85 | #endif |
84 | 86 | ||
87 | extern int safe_smp_processor_id(void); | ||
85 | extern int __cpu_disable(void); | 88 | extern int __cpu_disable(void); |
86 | extern void __cpu_die(unsigned int cpu); | 89 | extern void __cpu_die(unsigned int cpu); |
87 | extern unsigned int num_processors; | 90 | extern unsigned int num_processors; |
@@ -90,6 +93,7 @@ extern unsigned int num_processors; | |||
90 | 93 | ||
91 | #else /* CONFIG_SMP */ | 94 | #else /* CONFIG_SMP */ |
92 | 95 | ||
96 | #define safe_smp_processor_id() 0 | ||
93 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | 97 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid |
94 | 98 | ||
95 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 99 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index b0b3043f05e1..c18b71fae6b3 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -205,4 +205,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
205 | : "+m" (rw->lock) : : "memory"); | 205 | : "+m" (rw->lock) : : "memory"); |
206 | } | 206 | } |
207 | 207 | ||
208 | #define _raw_spin_relax(lock) cpu_relax() | ||
209 | #define _raw_read_relax(lock) cpu_relax() | ||
210 | #define _raw_write_relax(lock) cpu_relax() | ||
211 | |||
208 | #endif /* __ASM_SPINLOCK_H */ | 212 | #endif /* __ASM_SPINLOCK_H */ |