diff options
Diffstat (limited to 'include')
406 files changed, 10058 insertions, 10702 deletions
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h index b7bf68d0407b..f44129abc02c 100644 --- a/include/asm-alpha/ide.h +++ b/include/asm-alpha/ide.h | |||
@@ -13,9 +13,6 @@ | |||
13 | 13 | ||
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | 15 | ||
16 | |||
17 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
18 | |||
19 | static inline int ide_default_irq(unsigned long base) | 16 | static inline int ide_default_irq(unsigned long base) |
20 | { | 17 | { |
21 | switch (base) { | 18 | switch (base) { |
@@ -40,14 +37,6 @@ static inline unsigned long ide_default_io_base(int index) | |||
40 | } | 37 | } |
41 | } | 38 | } |
42 | 39 | ||
43 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
44 | |||
45 | #ifdef CONFIG_PCI | ||
46 | #define ide_init_default_irq(base) (0) | ||
47 | #else | ||
48 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
49 | #endif | ||
50 | |||
51 | #include <asm-generic/ide_iops.h> | 40 | #include <asm-generic/ide_iops.h> |
52 | 41 | ||
53 | #endif /* __KERNEL__ */ | 42 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h index f1e9278a9fe2..d9b2034ed1d2 100644 --- a/include/asm-alpha/semaphore.h +++ b/include/asm-alpha/semaphore.h | |||
@@ -1,149 +1 @@ | |||
1 | #ifndef _ALPHA_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ALPHA_SEMAPHORE_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores.. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1996, 2000 Richard Henderson | ||
9 | */ | ||
10 | |||
11 | #include <asm/current.h> | ||
12 | #include <asm/system.h> | ||
13 | #include <asm/atomic.h> | ||
14 | #include <linux/compiler.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/rwsem.h> | ||
17 | |||
18 | struct semaphore { | ||
19 | atomic_t count; | ||
20 | wait_queue_head_t wait; | ||
21 | }; | ||
22 | |||
23 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
24 | { \ | ||
25 | .count = ATOMIC_INIT(n), \ | ||
26 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ | ||
27 | } | ||
28 | |||
29 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
31 | |||
32 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
33 | |||
34 | static inline void sema_init(struct semaphore *sem, int val) | ||
35 | { | ||
36 | /* | ||
37 | * Logically, | ||
38 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
39 | * except that gcc produces better initializing by parts yet. | ||
40 | */ | ||
41 | |||
42 | atomic_set(&sem->count, val); | ||
43 | init_waitqueue_head(&sem->wait); | ||
44 | } | ||
45 | |||
46 | static inline void init_MUTEX (struct semaphore *sem) | ||
47 | { | ||
48 | sema_init(sem, 1); | ||
49 | } | ||
50 | |||
51 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
52 | { | ||
53 | sema_init(sem, 0); | ||
54 | } | ||
55 | |||
56 | extern void down(struct semaphore *); | ||
57 | extern void __down_failed(struct semaphore *); | ||
58 | extern int down_interruptible(struct semaphore *); | ||
59 | extern int __down_failed_interruptible(struct semaphore *); | ||
60 | extern int down_trylock(struct semaphore *); | ||
61 | extern void up(struct semaphore *); | ||
62 | extern void __up_wakeup(struct semaphore *); | ||
63 | |||
64 | /* | ||
65 | * Hidden out of line code is fun, but extremely messy. Rely on newer | ||
66 | * compilers to do a respectable job with this. The contention cases | ||
67 | * are handled out of line in arch/alpha/kernel/semaphore.c. | ||
68 | */ | ||
69 | |||
70 | static inline void __down(struct semaphore *sem) | ||
71 | { | ||
72 | long count; | ||
73 | might_sleep(); | ||
74 | count = atomic_dec_return(&sem->count); | ||
75 | if (unlikely(count < 0)) | ||
76 | __down_failed(sem); | ||
77 | } | ||
78 | |||
79 | static inline int __down_interruptible(struct semaphore *sem) | ||
80 | { | ||
81 | long count; | ||
82 | might_sleep(); | ||
83 | count = atomic_dec_return(&sem->count); | ||
84 | if (unlikely(count < 0)) | ||
85 | return __down_failed_interruptible(sem); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * down_trylock returns 0 on success, 1 if we failed to get the lock. | ||
91 | */ | ||
92 | |||
93 | static inline int __down_trylock(struct semaphore *sem) | ||
94 | { | ||
95 | long ret; | ||
96 | |||
97 | /* "Equivalent" C: | ||
98 | |||
99 | do { | ||
100 | ret = ldl_l; | ||
101 | --ret; | ||
102 | if (ret < 0) | ||
103 | break; | ||
104 | ret = stl_c = ret; | ||
105 | } while (ret == 0); | ||
106 | */ | ||
107 | __asm__ __volatile__( | ||
108 | "1: ldl_l %0,%1\n" | ||
109 | " subl %0,1,%0\n" | ||
110 | " blt %0,2f\n" | ||
111 | " stl_c %0,%1\n" | ||
112 | " beq %0,3f\n" | ||
113 | " mb\n" | ||
114 | "2:\n" | ||
115 | ".subsection 2\n" | ||
116 | "3: br 1b\n" | ||
117 | ".previous" | ||
118 | : "=&r" (ret), "=m" (sem->count) | ||
119 | : "m" (sem->count)); | ||
120 | |||
121 | return ret < 0; | ||
122 | } | ||
123 | |||
124 | static inline void __up(struct semaphore *sem) | ||
125 | { | ||
126 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
127 | __up_wakeup(sem); | ||
128 | } | ||
129 | |||
130 | #if !defined(CONFIG_DEBUG_SEMAPHORE) | ||
131 | extern inline void down(struct semaphore *sem) | ||
132 | { | ||
133 | __down(sem); | ||
134 | } | ||
135 | extern inline int down_interruptible(struct semaphore *sem) | ||
136 | { | ||
137 | return __down_interruptible(sem); | ||
138 | } | ||
139 | extern inline int down_trylock(struct semaphore *sem) | ||
140 | { | ||
141 | return __down_trylock(sem); | ||
142 | } | ||
143 | extern inline void up(struct semaphore *sem) | ||
144 | { | ||
145 | __up(sem); | ||
146 | } | ||
147 | #endif | ||
148 | |||
149 | #endif | ||
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h index efd9a5eb1008..90d14ee564f5 100644 --- a/include/asm-arm/arch-iop13xx/adma.h +++ b/include/asm-arm/arch-iop13xx/adma.h | |||
@@ -454,11 +454,6 @@ static inline void iop_chan_append(struct iop_adma_chan *chan) | |||
454 | __raw_writel(adma_accr, ADMA_ACCR(chan)); | 454 | __raw_writel(adma_accr, ADMA_ACCR(chan)); |
455 | } | 455 | } |
456 | 456 | ||
457 | static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) | ||
458 | { | ||
459 | do { } while (0); | ||
460 | } | ||
461 | |||
462 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) | 457 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) |
463 | { | 458 | { |
464 | return __raw_readl(ADMA_ACSR(chan)); | 459 | return __raw_readl(ADMA_ACSR(chan)); |
diff --git a/include/asm-arm/arch-s3c2410/spi.h b/include/asm-arm/arch-s3c2410/spi.h index 7ca0ed97a6d0..352d33860b63 100644 --- a/include/asm-arm/arch-s3c2410/spi.h +++ b/include/asm-arm/arch-s3c2410/spi.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | struct s3c2410_spi_info { | 16 | struct s3c2410_spi_info { |
17 | unsigned long pin_cs; /* simple gpio cs */ | 17 | unsigned long pin_cs; /* simple gpio cs */ |
18 | unsigned int num_cs; /* total chipselects */ | ||
18 | 19 | ||
19 | void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); | 20 | void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); |
20 | }; | 21 | }; |
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h index 5c529e6a5e3b..84d635b0a71a 100644 --- a/include/asm-arm/hardware/iop3xx-adma.h +++ b/include/asm-arm/hardware/iop3xx-adma.h | |||
@@ -767,20 +767,12 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) | |||
767 | static inline void iop_chan_append(struct iop_adma_chan *chan) | 767 | static inline void iop_chan_append(struct iop_adma_chan *chan) |
768 | { | 768 | { |
769 | u32 dma_chan_ctrl; | 769 | u32 dma_chan_ctrl; |
770 | /* workaround dropped interrupts on 3xx */ | ||
771 | mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3)); | ||
772 | 770 | ||
773 | dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); | 771 | dma_chan_ctrl = __raw_readl(DMA_CCR(chan)); |
774 | dma_chan_ctrl |= 0x2; | 772 | dma_chan_ctrl |= 0x2; |
775 | __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); | 773 | __raw_writel(dma_chan_ctrl, DMA_CCR(chan)); |
776 | } | 774 | } |
777 | 775 | ||
778 | static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan) | ||
779 | { | ||
780 | if (!busy) | ||
781 | del_timer(&chan->cleanup_watchdog); | ||
782 | } | ||
783 | |||
784 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) | 776 | static inline u32 iop_chan_get_status(struct iop_adma_chan *chan) |
785 | { | 777 | { |
786 | return __raw_readl(DMA_CSR(chan)); | 778 | return __raw_readl(DMA_CSR(chan)); |
diff --git a/include/asm-arm/hardware/iop_adma.h b/include/asm-arm/hardware/iop_adma.h index ca8e71f44346..cb7e3611bcba 100644 --- a/include/asm-arm/hardware/iop_adma.h +++ b/include/asm-arm/hardware/iop_adma.h | |||
@@ -51,7 +51,6 @@ struct iop_adma_device { | |||
51 | * @common: common dmaengine channel object members | 51 | * @common: common dmaengine channel object members |
52 | * @last_used: place holder for allocation to continue from where it left off | 52 | * @last_used: place holder for allocation to continue from where it left off |
53 | * @all_slots: complete domain of slots usable by the channel | 53 | * @all_slots: complete domain of slots usable by the channel |
54 | * @cleanup_watchdog: workaround missed interrupts on iop3xx | ||
55 | * @slots_allocated: records the actual size of the descriptor slot pool | 54 | * @slots_allocated: records the actual size of the descriptor slot pool |
56 | * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs | 55 | * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs |
57 | */ | 56 | */ |
@@ -65,7 +64,6 @@ struct iop_adma_chan { | |||
65 | struct dma_chan common; | 64 | struct dma_chan common; |
66 | struct iop_adma_desc_slot *last_used; | 65 | struct iop_adma_desc_slot *last_used; |
67 | struct list_head all_slots; | 66 | struct list_head all_slots; |
68 | struct timer_list cleanup_watchdog; | ||
69 | int slots_allocated; | 67 | int slots_allocated; |
70 | struct tasklet_struct irq_tasklet; | 68 | struct tasklet_struct irq_tasklet; |
71 | }; | 69 | }; |
diff --git a/include/asm-arm/ide.h b/include/asm-arm/ide.h index f348fcf3150b..88f4d231ce4f 100644 --- a/include/asm-arm/ide.h +++ b/include/asm-arm/ide.h | |||
@@ -17,14 +17,6 @@ | |||
17 | #define MAX_HWIFS 4 | 17 | #define MAX_HWIFS 4 |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #if !defined(CONFIG_ARCH_L7200) | ||
21 | # ifdef CONFIG_ARCH_CLPS7500 | ||
22 | # define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
23 | # else | ||
24 | # define ide_default_io_ctl(base) (0) | ||
25 | # endif | ||
26 | #endif /* !ARCH_L7200 */ | ||
27 | |||
28 | #define __ide_mm_insw(port,addr,len) readsw(port,addr,len) | 20 | #define __ide_mm_insw(port,addr,len) readsw(port,addr,len) |
29 | #define __ide_mm_insl(port,addr,len) readsl(port,addr,len) | 21 | #define __ide_mm_insl(port,addr,len) readsl(port,addr,len) |
30 | #define __ide_mm_outsw(port,addr,len) writesw(port,addr,len) | 22 | #define __ide_mm_outsw(port,addr,len) writesw(port,addr,len) |
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h deleted file mode 100644 index 1d7f1987edb9..000000000000 --- a/include/asm-arm/semaphore-helper.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | #ifndef ASMARM_SEMAPHORE_HELPER_H | ||
2 | #define ASMARM_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * These two _must_ execute atomically wrt each other. | ||
6 | */ | ||
7 | static inline void wake_one_more(struct semaphore * sem) | ||
8 | { | ||
9 | unsigned long flags; | ||
10 | |||
11 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
12 | if (atomic_read(&sem->count) <= 0) | ||
13 | sem->waking++; | ||
14 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
15 | } | ||
16 | |||
17 | static inline int waking_non_zero(struct semaphore *sem) | ||
18 | { | ||
19 | unsigned long flags; | ||
20 | int ret = 0; | ||
21 | |||
22 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
23 | if (sem->waking > 0) { | ||
24 | sem->waking--; | ||
25 | ret = 1; | ||
26 | } | ||
27 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | /* | ||
32 | * waking non zero interruptible | ||
33 | * 1 got the lock | ||
34 | * 0 go to sleep | ||
35 | * -EINTR interrupted | ||
36 | * | ||
37 | * We must undo the sem->count down_interruptible() increment while we are | ||
38 | * protected by the spinlock in order to make this atomic_inc() with the | ||
39 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
40 | */ | ||
41 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
42 | struct task_struct *tsk) | ||
43 | { | ||
44 | unsigned long flags; | ||
45 | int ret = 0; | ||
46 | |||
47 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
48 | if (sem->waking > 0) { | ||
49 | sem->waking--; | ||
50 | ret = 1; | ||
51 | } else if (signal_pending(tsk)) { | ||
52 | atomic_inc(&sem->count); | ||
53 | ret = -EINTR; | ||
54 | } | ||
55 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * waking_non_zero_try_lock: | ||
61 | * 1 failed to lock | ||
62 | * 0 got the lock | ||
63 | * | ||
64 | * We must undo the sem->count down_interruptible() increment while we are | ||
65 | * protected by the spinlock in order to make this atomic_inc() with the | ||
66 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
67 | */ | ||
68 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
69 | { | ||
70 | unsigned long flags; | ||
71 | int ret = 1; | ||
72 | |||
73 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
74 | if (sem->waking <= 0) | ||
75 | atomic_inc(&sem->count); | ||
76 | else { | ||
77 | sem->waking--; | ||
78 | ret = 0; | ||
79 | } | ||
80 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | #endif | ||
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h index 1c8b441f89e3..d9b2034ed1d2 100644 --- a/include/asm-arm/semaphore.h +++ b/include/asm-arm/semaphore.h | |||
@@ -1,98 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * linux/include/asm-arm/semaphore.h | ||
3 | */ | ||
4 | #ifndef __ASM_ARM_SEMAPHORE_H | ||
5 | #define __ASM_ARM_SEMAPHORE_H | ||
6 | |||
7 | #include <linux/linkage.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | #include <asm/atomic.h> | ||
13 | #include <asm/locks.h> | ||
14 | |||
15 | struct semaphore { | ||
16 | atomic_t count; | ||
17 | int sleepers; | ||
18 | wait_queue_head_t wait; | ||
19 | }; | ||
20 | |||
21 | #define __SEMAPHORE_INIT(name, cnt) \ | ||
22 | { \ | ||
23 | .count = ATOMIC_INIT(cnt), \ | ||
24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ | ||
25 | } | ||
26 | |||
27 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
28 | struct semaphore name = __SEMAPHORE_INIT(name,count) | ||
29 | |||
30 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
31 | |||
32 | static inline void sema_init(struct semaphore *sem, int val) | ||
33 | { | ||
34 | atomic_set(&sem->count, val); | ||
35 | sem->sleepers = 0; | ||
36 | init_waitqueue_head(&sem->wait); | ||
37 | } | ||
38 | |||
39 | static inline void init_MUTEX(struct semaphore *sem) | ||
40 | { | ||
41 | sema_init(sem, 1); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 0); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * special register calling convention | ||
51 | */ | ||
52 | asmlinkage void __down_failed(void); | ||
53 | asmlinkage int __down_interruptible_failed(void); | ||
54 | asmlinkage int __down_trylock_failed(void); | ||
55 | asmlinkage void __up_wakeup(void); | ||
56 | |||
57 | extern void __down(struct semaphore * sem); | ||
58 | extern int __down_interruptible(struct semaphore * sem); | ||
59 | extern int __down_trylock(struct semaphore * sem); | ||
60 | extern void __up(struct semaphore * sem); | ||
61 | |||
62 | /* | ||
63 | * This is ugly, but we want the default case to fall through. | ||
64 | * "__down" is the actual routine that waits... | ||
65 | */ | ||
66 | static inline void down(struct semaphore * sem) | ||
67 | { | ||
68 | might_sleep(); | ||
69 | __down_op(sem, __down_failed); | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * This is ugly, but we want the default case to fall through. | ||
74 | * "__down_interruptible" is the actual routine that waits... | ||
75 | */ | ||
76 | static inline int down_interruptible (struct semaphore * sem) | ||
77 | { | ||
78 | might_sleep(); | ||
79 | return __down_op_ret(sem, __down_interruptible_failed); | ||
80 | } | ||
81 | |||
82 | static inline int down_trylock(struct semaphore *sem) | ||
83 | { | ||
84 | return __down_op_ret(sem, __down_trylock_failed); | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Note! This is subtle. We jump to wake people up only if | ||
89 | * the semaphore was negative (== somebody was waiting on it). | ||
90 | * The default case (no contention) will result in NO | ||
91 | * jumps for both down() and up(). | ||
92 | */ | ||
93 | static inline void up(struct semaphore * sem) | ||
94 | { | ||
95 | __up_op(sem, __up_wakeup); | ||
96 | } | ||
97 | |||
98 | #endif | ||
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h index feaf1d453386..d9b2034ed1d2 100644 --- a/include/asm-avr32/semaphore.h +++ b/include/asm-avr32/semaphore.h | |||
@@ -1,108 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * SMP- and interrupt-safe semaphores. | ||
3 | * | ||
4 | * Copyright (C) 2006 Atmel Corporation | ||
5 | * | ||
6 | * Based on include/asm-i386/semaphore.h | ||
7 | * Copyright (C) 1996 Linus Torvalds | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | #ifndef __ASM_AVR32_SEMAPHORE_H | ||
14 | #define __ASM_AVR32_SEMAPHORE_H | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | |||
18 | #include <asm/system.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <linux/rwsem.h> | ||
22 | |||
23 | struct semaphore { | ||
24 | atomic_t count; | ||
25 | int sleepers; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
30 | { \ | ||
31 | .count = ATOMIC_INIT(n), \ | ||
32 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
33 | } | ||
34 | |||
35 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
36 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
37 | |||
38 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
39 | |||
40 | static inline void sema_init (struct semaphore *sem, int val) | ||
41 | { | ||
42 | atomic_set(&sem->count, val); | ||
43 | sem->sleepers = 0; | ||
44 | init_waitqueue_head(&sem->wait); | ||
45 | } | ||
46 | |||
47 | static inline void init_MUTEX (struct semaphore *sem) | ||
48 | { | ||
49 | sema_init(sem, 1); | ||
50 | } | ||
51 | |||
52 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
53 | { | ||
54 | sema_init(sem, 0); | ||
55 | } | ||
56 | |||
57 | void __down(struct semaphore * sem); | ||
58 | int __down_interruptible(struct semaphore * sem); | ||
59 | void __up(struct semaphore * sem); | ||
60 | |||
61 | /* | ||
62 | * This is ugly, but we want the default case to fall through. | ||
63 | * "__down_failed" is a special asm handler that calls the C | ||
64 | * routine that actually waits. See arch/i386/kernel/semaphore.c | ||
65 | */ | ||
66 | static inline void down(struct semaphore * sem) | ||
67 | { | ||
68 | might_sleep(); | ||
69 | if (unlikely(atomic_dec_return (&sem->count) < 0)) | ||
70 | __down (sem); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Interruptible try to acquire a semaphore. If we obtained | ||
75 | * it, return zero. If we were interrupted, returns -EINTR | ||
76 | */ | ||
77 | static inline int down_interruptible(struct semaphore * sem) | ||
78 | { | ||
79 | int ret = 0; | ||
80 | |||
81 | might_sleep(); | ||
82 | if (unlikely(atomic_dec_return (&sem->count) < 0)) | ||
83 | ret = __down_interruptible (sem); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Non-blockingly attempt to down() a semaphore. | ||
89 | * Returns zero if we acquired it | ||
90 | */ | ||
91 | static inline int down_trylock(struct semaphore * sem) | ||
92 | { | ||
93 | return atomic_dec_if_positive(&sem->count) < 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Note! This is subtle. We jump to wake people up only if | ||
98 | * the semaphore was negative (== somebody was waiting on it). | ||
99 | * The default case (no contention) will result in NO | ||
100 | * jumps for both down() and up(). | ||
101 | */ | ||
102 | static inline void up(struct semaphore * sem) | ||
103 | { | ||
104 | if (unlikely(atomic_inc_return (&sem->count) <= 0)) | ||
105 | __up (sem); | ||
106 | } | ||
107 | |||
108 | #endif /*__ASM_AVR32_SEMAPHORE_H */ | ||
diff --git a/include/asm-blackfin/ide.h b/include/asm-blackfin/ide.h index 121e272581d6..5b88de115bf4 100644 --- a/include/asm-blackfin/ide.h +++ b/include/asm-blackfin/ide.h | |||
@@ -19,10 +19,6 @@ | |||
19 | 19 | ||
20 | #define MAX_HWIFS 1 | 20 | #define MAX_HWIFS 1 |
21 | 21 | ||
22 | /* Legacy ... BLK_DEV_IDECS */ | ||
23 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
24 | |||
25 | |||
26 | #include <asm-generic/ide_iops.h> | 22 | #include <asm-generic/ide_iops.h> |
27 | 23 | ||
28 | /****************************************************************************/ | 24 | /****************************************************************************/ |
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h deleted file mode 100644 index 9082b0dc3eb5..000000000000 --- a/include/asm-blackfin/semaphore-helper.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | /* Based on M68K version, Lineo Inc. May 2001 */ | ||
2 | |||
3 | #ifndef _BFIN_SEMAPHORE_HELPER_H | ||
4 | #define _BFIN_SEMAPHORE_HELPER_H | ||
5 | |||
6 | /* | ||
7 | * SMP- and interrupt-safe semaphores helper functions. | ||
8 | * | ||
9 | * (C) Copyright 1996 Linus Torvalds | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <asm/errno.h> | ||
14 | |||
15 | /* | ||
16 | * These two _must_ execute atomically wrt each other. | ||
17 | */ | ||
18 | static inline void wake_one_more(struct semaphore *sem) | ||
19 | { | ||
20 | atomic_inc(&sem->waking); | ||
21 | } | ||
22 | |||
23 | static inline int waking_non_zero(struct semaphore *sem) | ||
24 | { | ||
25 | int ret; | ||
26 | unsigned long flags = 0; | ||
27 | |||
28 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
29 | ret = 0; | ||
30 | if (atomic_read(&sem->waking) > 0) { | ||
31 | atomic_dec(&sem->waking); | ||
32 | ret = 1; | ||
33 | } | ||
34 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * waking_non_zero_interruptible: | ||
40 | * 1 got the lock | ||
41 | * 0 go to sleep | ||
42 | * -EINTR interrupted | ||
43 | */ | ||
44 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
45 | struct task_struct *tsk) | ||
46 | { | ||
47 | int ret = 0; | ||
48 | unsigned long flags = 0; | ||
49 | |||
50 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
51 | if (atomic_read(&sem->waking) > 0) { | ||
52 | atomic_dec(&sem->waking); | ||
53 | ret = 1; | ||
54 | } else if (signal_pending(tsk)) { | ||
55 | atomic_inc(&sem->count); | ||
56 | ret = -EINTR; | ||
57 | } | ||
58 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * waking_non_zero_trylock: | ||
64 | * 1 failed to lock | ||
65 | * 0 got the lock | ||
66 | */ | ||
67 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
68 | { | ||
69 | int ret = 1; | ||
70 | unsigned long flags = 0; | ||
71 | |||
72 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
73 | if (atomic_read(&sem->waking) > 0) { | ||
74 | atomic_dec(&sem->waking); | ||
75 | ret = 0; | ||
76 | } else | ||
77 | atomic_inc(&sem->count); | ||
78 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | #endif /* _BFIN_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h index 533f90fb2e4e..d9b2034ed1d2 100644 --- a/include/asm-blackfin/semaphore.h +++ b/include/asm-blackfin/semaphore.h | |||
@@ -1,105 +1 @@ | |||
1 | #ifndef _BFIN_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _BFIN_SEMAPHORE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #include <linux/linkage.h> | ||
7 | #include <linux/wait.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/rwsem.h> | ||
10 | #include <asm/atomic.h> | ||
11 | |||
12 | /* | ||
13 | * Interrupt-safe semaphores.. | ||
14 | * | ||
15 | * (C) Copyright 1996 Linus Torvalds | ||
16 | * | ||
17 | * BFIN version by akbar hussain Lineo Inc April 2001 | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | struct semaphore { | ||
22 | atomic_t count; | ||
23 | int sleepers; | ||
24 | wait_queue_head_t wait; | ||
25 | }; | ||
26 | |||
27 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
28 | { \ | ||
29 | .count = ATOMIC_INIT(n), \ | ||
30 | .sleepers = 0, \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
38 | |||
39 | static inline void sema_init(struct semaphore *sem, int val) | ||
40 | { | ||
41 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX(struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 1); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 0); | ||
52 | } | ||
53 | |||
54 | asmlinkage void __down(struct semaphore *sem); | ||
55 | asmlinkage int __down_interruptible(struct semaphore *sem); | ||
56 | asmlinkage int __down_trylock(struct semaphore *sem); | ||
57 | asmlinkage void __up(struct semaphore *sem); | ||
58 | |||
59 | extern spinlock_t semaphore_wake_lock; | ||
60 | |||
61 | /* | ||
62 | * This is ugly, but we want the default case to fall through. | ||
63 | * "down_failed" is a special asm handler that calls the C | ||
64 | * routine that actually waits. | ||
65 | */ | ||
66 | static inline void down(struct semaphore *sem) | ||
67 | { | ||
68 | might_sleep(); | ||
69 | if (atomic_dec_return(&sem->count) < 0) | ||
70 | __down(sem); | ||
71 | } | ||
72 | |||
73 | static inline int down_interruptible(struct semaphore *sem) | ||
74 | { | ||
75 | int ret = 0; | ||
76 | |||
77 | might_sleep(); | ||
78 | if (atomic_dec_return(&sem->count) < 0) | ||
79 | ret = __down_interruptible(sem); | ||
80 | return (ret); | ||
81 | } | ||
82 | |||
83 | static inline int down_trylock(struct semaphore *sem) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (atomic_dec_return(&sem->count) < 0) | ||
88 | ret = __down_trylock(sem); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Note! This is subtle. We jump to wake people up only if | ||
94 | * the semaphore was negative (== somebody was waiting on it). | ||
95 | * The default case (no contention) will result in NO | ||
96 | * jumps for both down() and up(). | ||
97 | */ | ||
98 | static inline void up(struct semaphore *sem) | ||
99 | { | ||
100 | if (atomic_inc_return(&sem->count) <= 0) | ||
101 | __up(sem); | ||
102 | } | ||
103 | |||
104 | #endif /* __ASSEMBLY__ */ | ||
105 | #endif /* _BFIN_SEMAPHORE_H */ | ||
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h deleted file mode 100644 index 27bfeca1b981..000000000000 --- a/include/asm-cris/semaphore-helper.h +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | /* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $ | ||
2 | * | ||
3 | * SMP- and interrupt-safe semaphores helper functions. Generic versions, no | ||
4 | * optimizations whatsoever... | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_SEMAPHORE_HELPER_H | ||
9 | #define _ASM_SEMAPHORE_HELPER_H | ||
10 | |||
11 | #include <asm/atomic.h> | ||
12 | #include <linux/errno.h> | ||
13 | |||
14 | #define read(a) ((a)->counter) | ||
15 | #define inc(a) (((a)->counter)++) | ||
16 | #define dec(a) (((a)->counter)--) | ||
17 | |||
18 | #define count_inc(a) ((*(a))++) | ||
19 | |||
20 | /* | ||
21 | * These two _must_ execute atomically wrt each other. | ||
22 | */ | ||
23 | static inline void wake_one_more(struct semaphore * sem) | ||
24 | { | ||
25 | atomic_inc(&sem->waking); | ||
26 | } | ||
27 | |||
28 | static inline int waking_non_zero(struct semaphore *sem) | ||
29 | { | ||
30 | unsigned long flags; | ||
31 | int ret = 0; | ||
32 | |||
33 | local_irq_save(flags); | ||
34 | if (read(&sem->waking) > 0) { | ||
35 | dec(&sem->waking); | ||
36 | ret = 1; | ||
37 | } | ||
38 | local_irq_restore(flags); | ||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
43 | struct task_struct *tsk) | ||
44 | { | ||
45 | int ret = 0; | ||
46 | unsigned long flags; | ||
47 | |||
48 | local_irq_save(flags); | ||
49 | if (read(&sem->waking) > 0) { | ||
50 | dec(&sem->waking); | ||
51 | ret = 1; | ||
52 | } else if (signal_pending(tsk)) { | ||
53 | inc(&sem->count); | ||
54 | ret = -EINTR; | ||
55 | } | ||
56 | local_irq_restore(flags); | ||
57 | return ret; | ||
58 | } | ||
59 | |||
60 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
61 | { | ||
62 | int ret = 1; | ||
63 | unsigned long flags; | ||
64 | |||
65 | local_irq_save(flags); | ||
66 | if (read(&sem->waking) <= 0) | ||
67 | inc(&sem->count); | ||
68 | else { | ||
69 | dec(&sem->waking); | ||
70 | ret = 0; | ||
71 | } | ||
72 | local_irq_restore(flags); | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | #endif /* _ASM_SEMAPHORE_HELPER_H */ | ||
77 | |||
78 | |||
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h index 31a4ac448195..d9b2034ed1d2 100644 --- a/include/asm-cris/semaphore.h +++ b/include/asm-cris/semaphore.h | |||
@@ -1,133 +1 @@ | |||
1 | /* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */ | #include <linux/semaphore.h> | |
2 | |||
3 | /* On the i386 these are coded in asm, perhaps we should as well. Later.. */ | ||
4 | |||
5 | #ifndef _CRIS_SEMAPHORE_H | ||
6 | #define _CRIS_SEMAPHORE_H | ||
7 | |||
8 | #define RW_LOCK_BIAS 0x01000000 | ||
9 | |||
10 | #include <linux/wait.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/rwsem.h> | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/atomic.h> | ||
16 | |||
17 | /* | ||
18 | * CRIS semaphores, implemented in C-only so far. | ||
19 | */ | ||
20 | |||
21 | struct semaphore { | ||
22 | atomic_t count; | ||
23 | atomic_t waking; | ||
24 | wait_queue_head_t wait; | ||
25 | }; | ||
26 | |||
27 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
28 | { \ | ||
29 | .count = ATOMIC_INIT(n), \ | ||
30 | .waking = ATOMIC_INIT(0), \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
38 | |||
39 | static inline void sema_init(struct semaphore *sem, int val) | ||
40 | { | ||
41 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX (struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 1); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 0); | ||
52 | } | ||
53 | |||
54 | extern void __down(struct semaphore * sem); | ||
55 | extern int __down_interruptible(struct semaphore * sem); | ||
56 | extern int __down_trylock(struct semaphore * sem); | ||
57 | extern void __up(struct semaphore * sem); | ||
58 | |||
59 | /* notice - we probably can do cli/sti here instead of saving */ | ||
60 | |||
61 | static inline void down(struct semaphore * sem) | ||
62 | { | ||
63 | unsigned long flags; | ||
64 | int failed; | ||
65 | |||
66 | might_sleep(); | ||
67 | |||
68 | /* atomically decrement the semaphores count, and if its negative, we wait */ | ||
69 | cris_atomic_save(sem, flags); | ||
70 | failed = --(sem->count.counter) < 0; | ||
71 | cris_atomic_restore(sem, flags); | ||
72 | if(failed) { | ||
73 | __down(sem); | ||
74 | } | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * This version waits in interruptible state so that the waiting | ||
79 | * process can be killed. The down_interruptible routine | ||
80 | * returns negative for signalled and zero for semaphore acquired. | ||
81 | */ | ||
82 | |||
83 | static inline int down_interruptible(struct semaphore * sem) | ||
84 | { | ||
85 | unsigned long flags; | ||
86 | int failed; | ||
87 | |||
88 | might_sleep(); | ||
89 | |||
90 | /* atomically decrement the semaphores count, and if its negative, we wait */ | ||
91 | cris_atomic_save(sem, flags); | ||
92 | failed = --(sem->count.counter) < 0; | ||
93 | cris_atomic_restore(sem, flags); | ||
94 | if(failed) | ||
95 | failed = __down_interruptible(sem); | ||
96 | return(failed); | ||
97 | } | ||
98 | |||
99 | static inline int down_trylock(struct semaphore * sem) | ||
100 | { | ||
101 | unsigned long flags; | ||
102 | int failed; | ||
103 | |||
104 | cris_atomic_save(sem, flags); | ||
105 | failed = --(sem->count.counter) < 0; | ||
106 | cris_atomic_restore(sem, flags); | ||
107 | if(failed) | ||
108 | failed = __down_trylock(sem); | ||
109 | return(failed); | ||
110 | |||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Note! This is subtle. We jump to wake people up only if | ||
115 | * the semaphore was negative (== somebody was waiting on it). | ||
116 | * The default case (no contention) will result in NO | ||
117 | * jumps for both down() and up(). | ||
118 | */ | ||
119 | static inline void up(struct semaphore * sem) | ||
120 | { | ||
121 | unsigned long flags; | ||
122 | int wakeup; | ||
123 | |||
124 | /* atomically increment the semaphores count, and if it was negative, we wake people */ | ||
125 | cris_atomic_save(sem, flags); | ||
126 | wakeup = ++(sem->count.counter) <= 0; | ||
127 | cris_atomic_restore(sem, flags); | ||
128 | if(wakeup) { | ||
129 | __up(sem); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | #endif | ||
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h index d7aaa1911a1a..d9b2034ed1d2 100644 --- a/include/asm-frv/semaphore.h +++ b/include/asm-frv/semaphore.h | |||
@@ -1,155 +1 @@ | |||
1 | /* semaphore.h: semaphores for the FR-V | #include <linux/semaphore.h> | |
2 | * | ||
3 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_SEMAPHORE_H | ||
12 | #define _ASM_SEMAPHORE_H | ||
13 | |||
14 | #define RW_LOCK_BIAS 0x01000000 | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/wait.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/rwsem.h> | ||
22 | |||
23 | /* | ||
24 | * the semaphore definition | ||
25 | * - if counter is >0 then there are tokens available on the semaphore for down to collect | ||
26 | * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait | ||
27 | * - if wait_list is not empty, then there are processes waiting for the semaphore | ||
28 | */ | ||
29 | struct semaphore { | ||
30 | unsigned counter; | ||
31 | spinlock_t wait_lock; | ||
32 | struct list_head wait_list; | ||
33 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
34 | unsigned __magic; | ||
35 | #endif | ||
36 | }; | ||
37 | |||
38 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
39 | # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic | ||
40 | #else | ||
41 | # define __SEM_DEBUG_INIT(name) | ||
42 | #endif | ||
43 | |||
44 | |||
45 | #define __SEMAPHORE_INITIALIZER(name,count) \ | ||
46 | { count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) } | ||
47 | |||
48 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
49 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
50 | |||
51 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
52 | |||
53 | static inline void sema_init (struct semaphore *sem, int val) | ||
54 | { | ||
55 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
56 | } | ||
57 | |||
58 | static inline void init_MUTEX (struct semaphore *sem) | ||
59 | { | ||
60 | sema_init(sem, 1); | ||
61 | } | ||
62 | |||
63 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
64 | { | ||
65 | sema_init(sem, 0); | ||
66 | } | ||
67 | |||
68 | extern void __down(struct semaphore *sem, unsigned long flags); | ||
69 | extern int __down_interruptible(struct semaphore *sem, unsigned long flags); | ||
70 | extern void __up(struct semaphore *sem); | ||
71 | |||
72 | static inline void down(struct semaphore *sem) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | |||
76 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
77 | CHECK_MAGIC(sem->__magic); | ||
78 | #endif | ||
79 | |||
80 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
81 | if (likely(sem->counter > 0)) { | ||
82 | sem->counter--; | ||
83 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
84 | } | ||
85 | else { | ||
86 | __down(sem, flags); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static inline int down_interruptible(struct semaphore *sem) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | int ret = 0; | ||
94 | |||
95 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
96 | CHECK_MAGIC(sem->__magic); | ||
97 | #endif | ||
98 | |||
99 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
100 | if (likely(sem->counter > 0)) { | ||
101 | sem->counter--; | ||
102 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
103 | } | ||
104 | else { | ||
105 | ret = __down_interruptible(sem, flags); | ||
106 | } | ||
107 | return ret; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * non-blockingly attempt to down() a semaphore. | ||
112 | * - returns zero if we acquired it | ||
113 | */ | ||
114 | static inline int down_trylock(struct semaphore *sem) | ||
115 | { | ||
116 | unsigned long flags; | ||
117 | int success = 0; | ||
118 | |||
119 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
120 | CHECK_MAGIC(sem->__magic); | ||
121 | #endif | ||
122 | |||
123 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
124 | if (sem->counter > 0) { | ||
125 | sem->counter--; | ||
126 | success = 1; | ||
127 | } | ||
128 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
129 | return !success; | ||
130 | } | ||
131 | |||
132 | static inline void up(struct semaphore *sem) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
137 | CHECK_MAGIC(sem->__magic); | ||
138 | #endif | ||
139 | |||
140 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
141 | if (!list_empty(&sem->wait_list)) | ||
142 | __up(sem); | ||
143 | else | ||
144 | sem->counter++; | ||
145 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
146 | } | ||
147 | |||
148 | static inline int sem_getcount(struct semaphore *sem) | ||
149 | { | ||
150 | return sem->counter; | ||
151 | } | ||
152 | |||
153 | #endif /* __ASSEMBLY__ */ | ||
154 | |||
155 | #endif | ||
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h deleted file mode 100644 index 4fea36be5fd8..000000000000 --- a/include/asm-h8300/semaphore-helper.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | #ifndef _H8300_SEMAPHORE_HELPER_H | ||
2 | #define _H8300_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * | ||
9 | * based on | ||
10 | * m68k version by Andreas Schwab | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | |||
15 | /* | ||
16 | * These two _must_ execute atomically wrt each other. | ||
17 | */ | ||
18 | static inline void wake_one_more(struct semaphore * sem) | ||
19 | { | ||
20 | atomic_inc((atomic_t *)&sem->sleepers); | ||
21 | } | ||
22 | |||
23 | static inline int waking_non_zero(struct semaphore *sem) | ||
24 | { | ||
25 | int ret; | ||
26 | unsigned long flags; | ||
27 | |||
28 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
29 | ret = 0; | ||
30 | if (sem->sleepers > 0) { | ||
31 | sem->sleepers--; | ||
32 | ret = 1; | ||
33 | } | ||
34 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * waking_non_zero_interruptible: | ||
40 | * 1 got the lock | ||
41 | * 0 go to sleep | ||
42 | * -EINTR interrupted | ||
43 | */ | ||
44 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
45 | struct task_struct *tsk) | ||
46 | { | ||
47 | int ret; | ||
48 | unsigned long flags; | ||
49 | |||
50 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
51 | ret = 0; | ||
52 | if (sem->sleepers > 0) { | ||
53 | sem->sleepers--; | ||
54 | ret = 1; | ||
55 | } else if (signal_pending(tsk)) { | ||
56 | atomic_inc(&sem->count); | ||
57 | ret = -EINTR; | ||
58 | } | ||
59 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * waking_non_zero_trylock: | ||
65 | * 1 failed to lock | ||
66 | * 0 got the lock | ||
67 | */ | ||
68 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
69 | { | ||
70 | int ret; | ||
71 | unsigned long flags; | ||
72 | |||
73 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
74 | ret = 1; | ||
75 | if (sem->sleepers <= 0) | ||
76 | atomic_inc(&sem->count); | ||
77 | else { | ||
78 | sem->sleepers--; | ||
79 | ret = 0; | ||
80 | } | ||
81 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | #endif | ||
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h index f3ffff83ff09..d9b2034ed1d2 100644 --- a/include/asm-h8300/semaphore.h +++ b/include/asm-h8300/semaphore.h | |||
@@ -1,190 +1 @@ | |||
1 | #ifndef _H8300_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _H8300_SEMAPHORE_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | |||
13 | #include <asm/system.h> | ||
14 | #include <asm/atomic.h> | ||
15 | |||
16 | /* | ||
17 | * Interrupt-safe semaphores.. | ||
18 | * | ||
19 | * (C) Copyright 1996 Linus Torvalds | ||
20 | * | ||
21 | * H8/300 version by Yoshinori Sato | ||
22 | */ | ||
23 | |||
24 | |||
25 | struct semaphore { | ||
26 | atomic_t count; | ||
27 | int sleepers; | ||
28 | wait_queue_head_t wait; | ||
29 | }; | ||
30 | |||
31 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
32 | { \ | ||
33 | .count = ATOMIC_INIT(n), \ | ||
34 | .sleepers = 0, \ | ||
35 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
36 | } | ||
37 | |||
38 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
39 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
40 | |||
41 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
42 | |||
43 | static inline void sema_init (struct semaphore *sem, int val) | ||
44 | { | ||
45 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
46 | } | ||
47 | |||
48 | static inline void init_MUTEX (struct semaphore *sem) | ||
49 | { | ||
50 | sema_init(sem, 1); | ||
51 | } | ||
52 | |||
53 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
54 | { | ||
55 | sema_init(sem, 0); | ||
56 | } | ||
57 | |||
58 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
59 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
60 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
61 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
62 | |||
63 | asmlinkage void __down(struct semaphore * sem); | ||
64 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
65 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
66 | asmlinkage void __up(struct semaphore * sem); | ||
67 | |||
68 | extern spinlock_t semaphore_wake_lock; | ||
69 | |||
70 | /* | ||
71 | * This is ugly, but we want the default case to fall through. | ||
72 | * "down_failed" is a special asm handler that calls the C | ||
73 | * routine that actually waits. See arch/m68k/lib/semaphore.S | ||
74 | */ | ||
75 | static inline void down(struct semaphore * sem) | ||
76 | { | ||
77 | register atomic_t *count asm("er0"); | ||
78 | |||
79 | might_sleep(); | ||
80 | |||
81 | count = &(sem->count); | ||
82 | __asm__ __volatile__( | ||
83 | "stc ccr,r3l\n\t" | ||
84 | "orc #0x80,ccr\n\t" | ||
85 | "mov.l %2, er1\n\t" | ||
86 | "dec.l #1,er1\n\t" | ||
87 | "mov.l er1,%0\n\t" | ||
88 | "bpl 1f\n\t" | ||
89 | "ldc r3l,ccr\n\t" | ||
90 | "mov.l %1,er0\n\t" | ||
91 | "jsr @___down\n\t" | ||
92 | "bra 2f\n" | ||
93 | "1:\n\t" | ||
94 | "ldc r3l,ccr\n" | ||
95 | "2:" | ||
96 | : "=m"(*count) | ||
97 | : "g"(sem),"m"(*count) | ||
98 | : "cc", "er1", "er2", "er3"); | ||
99 | } | ||
100 | |||
101 | static inline int down_interruptible(struct semaphore * sem) | ||
102 | { | ||
103 | register atomic_t *count asm("er0"); | ||
104 | |||
105 | might_sleep(); | ||
106 | |||
107 | count = &(sem->count); | ||
108 | __asm__ __volatile__( | ||
109 | "stc ccr,r1l\n\t" | ||
110 | "orc #0x80,ccr\n\t" | ||
111 | "mov.l %3, er2\n\t" | ||
112 | "dec.l #1,er2\n\t" | ||
113 | "mov.l er2,%1\n\t" | ||
114 | "bpl 1f\n\t" | ||
115 | "ldc r1l,ccr\n\t" | ||
116 | "mov.l %2,er0\n\t" | ||
117 | "jsr @___down_interruptible\n\t" | ||
118 | "bra 2f\n" | ||
119 | "1:\n\t" | ||
120 | "ldc r1l,ccr\n\t" | ||
121 | "sub.l %0,%0\n\t" | ||
122 | "2:\n\t" | ||
123 | : "=r" (count),"=m" (*count) | ||
124 | : "g"(sem),"m"(*count) | ||
125 | : "cc", "er1", "er2", "er3"); | ||
126 | return (int)count; | ||
127 | } | ||
128 | |||
129 | static inline int down_trylock(struct semaphore * sem) | ||
130 | { | ||
131 | register atomic_t *count asm("er0"); | ||
132 | |||
133 | count = &(sem->count); | ||
134 | __asm__ __volatile__( | ||
135 | "stc ccr,r3l\n\t" | ||
136 | "orc #0x80,ccr\n\t" | ||
137 | "mov.l %3,er2\n\t" | ||
138 | "dec.l #1,er2\n\t" | ||
139 | "mov.l er2,%0\n\t" | ||
140 | "bpl 1f\n\t" | ||
141 | "ldc r3l,ccr\n\t" | ||
142 | "jmp @3f\n\t" | ||
143 | LOCK_SECTION_START(".align 2\n\t") | ||
144 | "3:\n\t" | ||
145 | "mov.l %2,er0\n\t" | ||
146 | "jsr @___down_trylock\n\t" | ||
147 | "jmp @2f\n\t" | ||
148 | LOCK_SECTION_END | ||
149 | "1:\n\t" | ||
150 | "ldc r3l,ccr\n\t" | ||
151 | "sub.l %1,%1\n" | ||
152 | "2:" | ||
153 | : "=m" (*count),"=r"(count) | ||
154 | : "g"(sem),"m"(*count) | ||
155 | : "cc", "er1","er2", "er3"); | ||
156 | return (int)count; | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Note! This is subtle. We jump to wake people up only if | ||
161 | * the semaphore was negative (== somebody was waiting on it). | ||
162 | * The default case (no contention) will result in NO | ||
163 | * jumps for both down() and up(). | ||
164 | */ | ||
165 | static inline void up(struct semaphore * sem) | ||
166 | { | ||
167 | register atomic_t *count asm("er0"); | ||
168 | |||
169 | count = &(sem->count); | ||
170 | __asm__ __volatile__( | ||
171 | "stc ccr,r3l\n\t" | ||
172 | "orc #0x80,ccr\n\t" | ||
173 | "mov.l %2,er1\n\t" | ||
174 | "inc.l #1,er1\n\t" | ||
175 | "mov.l er1,%0\n\t" | ||
176 | "ldc r3l,ccr\n\t" | ||
177 | "sub.l er2,er2\n\t" | ||
178 | "cmp.l er2,er1\n\t" | ||
179 | "bgt 1f\n\t" | ||
180 | "mov.l %1,er0\n\t" | ||
181 | "jsr @___up\n" | ||
182 | "1:" | ||
183 | : "=m"(*count) | ||
184 | : "g"(sem),"m"(*count) | ||
185 | : "cc", "er1", "er2", "er3"); | ||
186 | } | ||
187 | |||
188 | #endif /* __ASSEMBLY__ */ | ||
189 | |||
190 | #endif | ||
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index cd1cc39b5599..fcfad326f4c7 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/numa.h> | 36 | #include <linux/numa.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | #include <asm/numa.h> | ||
38 | 39 | ||
39 | #define COMPILER_DEPENDENT_INT64 long | 40 | #define COMPILER_DEPENDENT_INT64 long |
40 | #define COMPILER_DEPENDENT_UINT64 unsigned long | 41 | #define COMPILER_DEPENDENT_UINT64 unsigned long |
@@ -115,7 +116,11 @@ extern unsigned int is_cpu_cpei_target(unsigned int cpu); | |||
115 | extern void set_cpei_target_cpu(unsigned int cpu); | 116 | extern void set_cpei_target_cpu(unsigned int cpu); |
116 | extern unsigned int get_cpei_target_cpu(void); | 117 | extern unsigned int get_cpei_target_cpu(void); |
117 | extern void prefill_possible_map(void); | 118 | extern void prefill_possible_map(void); |
119 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | ||
118 | extern int additional_cpus; | 120 | extern int additional_cpus; |
121 | #else | ||
122 | #define additional_cpus 0 | ||
123 | #endif | ||
119 | 124 | ||
120 | #ifdef CONFIG_ACPI_NUMA | 125 | #ifdef CONFIG_ACPI_NUMA |
121 | #if MAX_NUMNODES > 256 | 126 | #if MAX_NUMNODES > 256 |
@@ -129,6 +134,34 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; | |||
129 | 134 | ||
130 | #define acpi_unlazy_tlb(x) | 135 | #define acpi_unlazy_tlb(x) |
131 | 136 | ||
137 | #ifdef CONFIG_ACPI_NUMA | ||
138 | extern cpumask_t early_cpu_possible_map; | ||
139 | #define for_each_possible_early_cpu(cpu) \ | ||
140 | for_each_cpu_mask((cpu), early_cpu_possible_map) | ||
141 | |||
142 | static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) | ||
143 | { | ||
144 | int low_cpu, high_cpu; | ||
145 | int cpu; | ||
146 | int next_nid = 0; | ||
147 | |||
148 | low_cpu = cpus_weight(early_cpu_possible_map); | ||
149 | |||
150 | high_cpu = max(low_cpu, min_cpus); | ||
151 | high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); | ||
152 | |||
153 | for (cpu = low_cpu; cpu < high_cpu; cpu++) { | ||
154 | cpu_set(cpu, early_cpu_possible_map); | ||
155 | if (node_cpuid[cpu].nid == NUMA_NO_NODE) { | ||
156 | node_cpuid[cpu].nid = next_nid; | ||
157 | next_nid++; | ||
158 | if (next_nid >= num_online_nodes()) | ||
159 | next_nid = 0; | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | #endif /* CONFIG_ACPI_NUMA */ | ||
164 | |||
132 | #endif /*__KERNEL__*/ | 165 | #endif /*__KERNEL__*/ |
133 | 166 | ||
134 | #endif /*_ASM_ACPI_H*/ | 167 | #endif /*_ASM_ACPI_H*/ |
diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h index 72400a78002a..f9abdec6577a 100644 --- a/include/asm-ia64/cputime.h +++ b/include/asm-ia64/cputime.h | |||
@@ -1,6 +1,110 @@ | |||
1 | /* | ||
2 | * include/asm-ia64/cputime.h: | ||
3 | * Definitions for measuring cputime on ia64 machines. | ||
4 | * | ||
5 | * Based on <asm-powerpc/cputime.h>. | ||
6 | * | ||
7 | * Copyright (C) 2007 FUJITSU LIMITED | ||
8 | * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. | ||
16 | * Otherwise we measure cpu time in jiffies using the generic definitions. | ||
17 | */ | ||
18 | |||
1 | #ifndef __IA64_CPUTIME_H | 19 | #ifndef __IA64_CPUTIME_H |
2 | #define __IA64_CPUTIME_H | 20 | #define __IA64_CPUTIME_H |
3 | 21 | ||
22 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
4 | #include <asm-generic/cputime.h> | 23 | #include <asm-generic/cputime.h> |
24 | #else | ||
25 | |||
26 | #include <linux/time.h> | ||
27 | #include <linux/jiffies.h> | ||
28 | #include <asm/processor.h> | ||
29 | |||
30 | typedef u64 cputime_t; | ||
31 | typedef u64 cputime64_t; | ||
32 | |||
33 | #define cputime_zero ((cputime_t)0) | ||
34 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | ||
35 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
36 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
37 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
38 | #define cputime_halve(__a) ((__a) >> 1) | ||
39 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
40 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
41 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
42 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
43 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
44 | |||
45 | #define cputime64_zero ((cputime64_t)0) | ||
46 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
47 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
48 | #define cputime_to_cputime64(__ct) (__ct) | ||
49 | |||
50 | /* | ||
51 | * Convert cputime <-> jiffies (HZ) | ||
52 | */ | ||
53 | #define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | ||
54 | #define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | ||
55 | #define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | ||
56 | #define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | ||
57 | |||
58 | /* | ||
59 | * Convert cputime <-> milliseconds | ||
60 | */ | ||
61 | #define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC) | ||
62 | #define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC) | ||
63 | |||
64 | /* | ||
65 | * Convert cputime <-> seconds | ||
66 | */ | ||
67 | #define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) | ||
68 | #define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) | ||
69 | |||
70 | /* | ||
71 | * Convert cputime <-> timespec (nsec) | ||
72 | */ | ||
73 | static inline cputime_t timespec_to_cputime(const struct timespec *val) | ||
74 | { | ||
75 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | ||
76 | return (ret + val->tv_nsec); | ||
77 | } | ||
78 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | ||
79 | { | ||
80 | val->tv_sec = ct / NSEC_PER_SEC; | ||
81 | val->tv_nsec = ct % NSEC_PER_SEC; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Convert cputime <-> timeval (msec) | ||
86 | */ | ||
87 | static inline cputime_t timeval_to_cputime(struct timeval *val) | ||
88 | { | ||
89 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | ||
90 | return (ret + val->tv_usec * NSEC_PER_USEC); | ||
91 | } | ||
92 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) | ||
93 | { | ||
94 | val->tv_sec = ct / NSEC_PER_SEC; | ||
95 | val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Convert cputime <-> clock (USER_HZ) | ||
100 | */ | ||
101 | #define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) | ||
102 | #define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) | ||
103 | |||
104 | /* | ||
105 | * Convert cputime64 to clock. | ||
106 | */ | ||
107 | #define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) | ||
5 | 108 | ||
109 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
6 | #endif /* __IA64_CPUTIME_H */ | 110 | #endif /* __IA64_CPUTIME_H */ |
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h index f8e83eca67a2..5e0c1a6bce8d 100644 --- a/include/asm-ia64/elf.h +++ b/include/asm-ia64/elf.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define ELF_ARCH EM_IA_64 | 26 | #define ELF_ARCH EM_IA_64 |
27 | 27 | ||
28 | #define USE_ELF_CORE_DUMP | 28 | #define USE_ELF_CORE_DUMP |
29 | #define CORE_DUMP_USE_REGSET | ||
29 | 30 | ||
30 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are | 31 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are |
31 | interpreted as follows by Linux: */ | 32 | interpreted as follows by Linux: */ |
@@ -154,6 +155,30 @@ extern void ia64_init_addr_space (void); | |||
154 | #define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ | 155 | #define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ |
155 | #define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ | 156 | #define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ |
156 | 157 | ||
158 | /* elf_gregset_t register offsets */ | ||
159 | #define ELF_GR_0_OFFSET 0 | ||
160 | #define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t)) | ||
161 | #define ELF_PR_OFFSET (33 * sizeof(elf_greg_t)) | ||
162 | #define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t)) | ||
163 | #define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t)) | ||
164 | #define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t)) | ||
165 | #define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t)) | ||
166 | #define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t)) | ||
167 | #define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t)) | ||
168 | #define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t)) | ||
169 | #define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t)) | ||
170 | #define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t)) | ||
171 | #define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t)) | ||
172 | #define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t)) | ||
173 | #define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t)) | ||
174 | #define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t)) | ||
175 | #define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t)) | ||
176 | #define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t)) | ||
177 | #define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t)) | ||
178 | #define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t)) | ||
179 | #define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t)) | ||
180 | #define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t)) | ||
181 | |||
157 | typedef unsigned long elf_fpxregset_t; | 182 | typedef unsigned long elf_fpxregset_t; |
158 | 183 | ||
159 | typedef unsigned long elf_greg_t; | 184 | typedef unsigned long elf_greg_t; |
@@ -183,12 +208,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); | |||
183 | 208 | ||
184 | struct task_struct; | 209 | struct task_struct; |
185 | 210 | ||
186 | extern int dump_task_regs(struct task_struct *, elf_gregset_t *); | ||
187 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | ||
188 | |||
189 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs) | ||
190 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | ||
191 | |||
192 | #define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) | 211 | #define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) |
193 | 212 | ||
194 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ | 213 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ |
diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h index 1ccf23809329..8fa3f8cd067a 100644 --- a/include/asm-ia64/ide.h +++ b/include/asm-ia64/ide.h | |||
@@ -16,8 +16,6 @@ | |||
16 | 16 | ||
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | 18 | ||
19 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
20 | |||
21 | static inline int ide_default_irq(unsigned long base) | 19 | static inline int ide_default_irq(unsigned long base) |
22 | { | 20 | { |
23 | switch (base) { | 21 | switch (base) { |
@@ -46,14 +44,6 @@ static inline unsigned long ide_default_io_base(int index) | |||
46 | } | 44 | } |
47 | } | 45 | } |
48 | 46 | ||
49 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
50 | |||
51 | #ifdef CONFIG_PCI | ||
52 | #define ide_init_default_irq(base) (0) | ||
53 | #else | ||
54 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
55 | #endif | ||
56 | |||
57 | #include <asm-generic/ide_iops.h> | 47 | #include <asm-generic/ide_iops.h> |
58 | 48 | ||
59 | #endif /* __KERNEL__ */ | 49 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index d03bf9ff68e3..ef71b57fc2f4 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
@@ -30,8 +30,12 @@ | |||
30 | #include <asm/break.h> | 30 | #include <asm/break.h> |
31 | 31 | ||
32 | #define __ARCH_WANT_KPROBES_INSN_SLOT | 32 | #define __ARCH_WANT_KPROBES_INSN_SLOT |
33 | #define MAX_INSN_SIZE 1 | 33 | #define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */ |
34 | #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) | 34 | #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) |
35 | #define NOP_M_INST (long)(1<<27) | ||
36 | #define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \ | ||
37 | (0x1L << 12) | /* many */ \ | ||
38 | (((i1) & 1) << 36) | ((i2) << 13))) /* imm */ | ||
35 | 39 | ||
36 | typedef union cmp_inst { | 40 | typedef union cmp_inst { |
37 | struct { | 41 | struct { |
@@ -112,6 +116,7 @@ struct arch_specific_insn { | |||
112 | #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 | 116 | #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 |
113 | #define INST_FLAG_FIX_BRANCH_REG 2 | 117 | #define INST_FLAG_FIX_BRANCH_REG 2 |
114 | #define INST_FLAG_BREAK_INST 4 | 118 | #define INST_FLAG_BREAK_INST 4 |
119 | #define INST_FLAG_BOOSTABLE 8 | ||
115 | unsigned long inst_flag; | 120 | unsigned long inst_flag; |
116 | unsigned short target_br_reg; | 121 | unsigned short target_br_reg; |
117 | unsigned short slot; | 122 | unsigned short slot; |
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h index 7e55a584975c..aefcdfee7f23 100644 --- a/include/asm-ia64/kregs.h +++ b/include/asm-ia64/kregs.h | |||
@@ -31,6 +31,9 @@ | |||
31 | #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ | 31 | #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ |
32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ | 32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ |
33 | 33 | ||
34 | #define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ | ||
35 | #define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ | ||
36 | |||
34 | /* Processor status register bits: */ | 37 | /* Processor status register bits: */ |
35 | #define IA64_PSR_BE_BIT 1 | 38 | #define IA64_PSR_BE_BIT 1 |
36 | #define IA64_PSR_UP_BIT 2 | 39 | #define IA64_PSR_UP_BIT 2 |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index f93308f54b61..7245a5781594 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
@@ -35,6 +35,7 @@ extern void find_memory (void); | |||
35 | extern void reserve_memory (void); | 35 | extern void reserve_memory (void); |
36 | extern void find_initrd (void); | 36 | extern void find_initrd (void); |
37 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); | 37 | extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); |
38 | extern int filter_memory (unsigned long start, unsigned long end, void *arg); | ||
38 | extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); | 39 | extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); |
39 | extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); | 40 | extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); |
40 | 41 | ||
@@ -56,7 +57,7 @@ extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); | |||
56 | 57 | ||
57 | #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ | 58 | #define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ |
58 | 59 | ||
59 | extern int register_active_ranges(u64 start, u64 end, void *arg); | 60 | extern int register_active_ranges(u64 start, u64 len, int nid); |
60 | 61 | ||
61 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 62 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
62 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ | 63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ |
diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 6a8a27cfae3e..3499ff57bf42 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h | |||
@@ -22,6 +22,8 @@ | |||
22 | 22 | ||
23 | #include <asm/mmzone.h> | 23 | #include <asm/mmzone.h> |
24 | 24 | ||
25 | #define NUMA_NO_NODE -1 | ||
26 | |||
25 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | 27 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; |
26 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | 28 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; |
27 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; | 29 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; |
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 8a695d3407d2..67b02901ead4 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h | |||
@@ -13,6 +13,7 @@ | |||
13 | * Copyright (C) 1999 VA Linux Systems | 13 | * Copyright (C) 1999 VA Linux Systems |
14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | 14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
15 | * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> | 15 | * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> |
16 | * Copyright (C) 2008 Silicon Graphics, Inc. (SGI) | ||
16 | * | 17 | * |
17 | * 99/10/01 davidm Make sure we pass zero for reserved parameters. | 18 | * 99/10/01 davidm Make sure we pass zero for reserved parameters. |
18 | * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. | 19 | * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. |
@@ -73,6 +74,8 @@ | |||
73 | #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ | 74 | #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ |
74 | #define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ | 75 | #define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ |
75 | #define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ | 76 | #define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ |
77 | #define PAL_VP_INFO 50 /* Information about virtual processor features */ | ||
78 | #define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */ | ||
76 | 79 | ||
77 | #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ | 80 | #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ |
78 | #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ | 81 | #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ |
@@ -504,7 +507,8 @@ typedef struct pal_cache_check_info_s { | |||
504 | wiv : 1, /* Way field valid */ | 507 | wiv : 1, /* Way field valid */ |
505 | reserved2 : 1, | 508 | reserved2 : 1, |
506 | dp : 1, /* Data poisoned on MBE */ | 509 | dp : 1, /* Data poisoned on MBE */ |
507 | reserved3 : 8, | 510 | reserved3 : 6, |
511 | hlth : 2, /* Health indicator */ | ||
508 | 512 | ||
509 | index : 20, /* Cache line index */ | 513 | index : 20, /* Cache line index */ |
510 | reserved4 : 2, | 514 | reserved4 : 2, |
@@ -542,7 +546,9 @@ typedef struct pal_tlb_check_info_s { | |||
542 | dtc : 1, /* Fail in data TC */ | 546 | dtc : 1, /* Fail in data TC */ |
543 | itc : 1, /* Fail in inst. TC */ | 547 | itc : 1, /* Fail in inst. TC */ |
544 | op : 4, /* Cache operation */ | 548 | op : 4, /* Cache operation */ |
545 | reserved3 : 30, | 549 | reserved3 : 6, |
550 | hlth : 2, /* Health indicator */ | ||
551 | reserved4 : 22, | ||
546 | 552 | ||
547 | is : 1, /* instruction set (1 == ia32) */ | 553 | is : 1, /* instruction set (1 == ia32) */ |
548 | iv : 1, /* instruction set field valid */ | 554 | iv : 1, /* instruction set field valid */ |
@@ -633,7 +639,8 @@ typedef struct pal_uarch_check_info_s { | |||
633 | way : 6, /* Way of structure */ | 639 | way : 6, /* Way of structure */ |
634 | wv : 1, /* way valid */ | 640 | wv : 1, /* way valid */ |
635 | xv : 1, /* index valid */ | 641 | xv : 1, /* index valid */ |
636 | reserved1 : 8, | 642 | reserved1 : 6, |
643 | hlth : 2, /* Health indicator */ | ||
637 | index : 8, /* Index or set of the uarch | 644 | index : 8, /* Index or set of the uarch |
638 | * structure that failed. | 645 | * structure that failed. |
639 | */ | 646 | */ |
@@ -1213,14 +1220,12 @@ ia64_pal_mc_drain (void) | |||
1213 | 1220 | ||
1214 | /* Return the machine check dynamic processor state */ | 1221 | /* Return the machine check dynamic processor state */ |
1215 | static inline s64 | 1222 | static inline s64 |
1216 | ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds) | 1223 | ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size) |
1217 | { | 1224 | { |
1218 | struct ia64_pal_retval iprv; | 1225 | struct ia64_pal_retval iprv; |
1219 | PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); | 1226 | PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0); |
1220 | if (size) | 1227 | if (size) |
1221 | *size = iprv.v0; | 1228 | *size = iprv.v0; |
1222 | if (pds) | ||
1223 | *pds = iprv.v1; | ||
1224 | return iprv.status; | 1229 | return iprv.status; |
1225 | } | 1230 | } |
1226 | 1231 | ||
@@ -1281,15 +1286,41 @@ ia64_pal_mc_expected (u64 expected, u64 *previous) | |||
1281 | return iprv.status; | 1286 | return iprv.status; |
1282 | } | 1287 | } |
1283 | 1288 | ||
1289 | typedef union pal_hw_tracking_u { | ||
1290 | u64 pht_data; | ||
1291 | struct { | ||
1292 | u64 itc :4, /* Instruction cache tracking */ | ||
1293 | dct :4, /* Date cache tracking */ | ||
1294 | itt :4, /* Instruction TLB tracking */ | ||
1295 | ddt :4, /* Data TLB tracking */ | ||
1296 | reserved:48; | ||
1297 | } pal_hw_tracking_s; | ||
1298 | } pal_hw_tracking_u_t; | ||
1299 | |||
1300 | /* | ||
1301 | * Hardware tracking status. | ||
1302 | */ | ||
1303 | static inline s64 | ||
1304 | ia64_pal_mc_hw_tracking (u64 *status) | ||
1305 | { | ||
1306 | struct ia64_pal_retval iprv; | ||
1307 | PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0); | ||
1308 | if (status) | ||
1309 | *status = iprv.v0; | ||
1310 | return iprv.status; | ||
1311 | } | ||
1312 | |||
1284 | /* Register a platform dependent location with PAL to which it can save | 1313 | /* Register a platform dependent location with PAL to which it can save |
1285 | * minimal processor state in the event of a machine check or initialization | 1314 | * minimal processor state in the event of a machine check or initialization |
1286 | * event. | 1315 | * event. |
1287 | */ | 1316 | */ |
1288 | static inline s64 | 1317 | static inline s64 |
1289 | ia64_pal_mc_register_mem (u64 physical_addr) | 1318 | ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size) |
1290 | { | 1319 | { |
1291 | struct ia64_pal_retval iprv; | 1320 | struct ia64_pal_retval iprv; |
1292 | PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); | 1321 | PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0); |
1322 | if (req_size) | ||
1323 | *req_size = iprv.v0; | ||
1293 | return iprv.status; | 1324 | return iprv.status; |
1294 | } | 1325 | } |
1295 | 1326 | ||
@@ -1631,6 +1662,29 @@ ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2) | |||
1631 | return iprv.status; | 1662 | return iprv.status; |
1632 | } | 1663 | } |
1633 | 1664 | ||
1665 | typedef union pal_vp_info_u { | ||
1666 | u64 pvi_val; | ||
1667 | struct { | ||
1668 | u64 index: 48, /* virtual feature set info */ | ||
1669 | vmm_id: 16; /* feature set id */ | ||
1670 | } pal_vp_info_s; | ||
1671 | } pal_vp_info_u_t; | ||
1672 | |||
1673 | /* | ||
1674 | * Returns infomation about virtual processor features | ||
1675 | */ | ||
1676 | static inline s64 | ||
1677 | ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id) | ||
1678 | { | ||
1679 | struct ia64_pal_retval iprv; | ||
1680 | PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0); | ||
1681 | if (vp_info) | ||
1682 | *vp_info = iprv.v0; | ||
1683 | if (vmm_id) | ||
1684 | *vmm_id = iprv.v1; | ||
1685 | return iprv.status; | ||
1686 | } | ||
1687 | |||
1634 | typedef union pal_itr_valid_u { | 1688 | typedef union pal_itr_valid_u { |
1635 | u64 piv_val; | 1689 | u64 piv_val; |
1636 | struct { | 1690 | struct { |
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index e6204f14f614..ed70862ea247 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h | |||
@@ -371,7 +371,7 @@ pgd_index (unsigned long address) | |||
371 | /* The offset in the 1-level directory is given by the 3 region bits | 371 | /* The offset in the 1-level directory is given by the 3 region bits |
372 | (61..63) and the level-1 bits. */ | 372 | (61..63) and the level-1 bits. */ |
373 | static inline pgd_t* | 373 | static inline pgd_t* |
374 | pgd_offset (struct mm_struct *mm, unsigned long address) | 374 | pgd_offset (const struct mm_struct *mm, unsigned long address) |
375 | { | 375 | { |
376 | return mm->pgd + pgd_index(address); | 376 | return mm->pgd + pgd_index(address); |
377 | } | 377 | } |
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index f4904db3b057..89594b442f83 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h | |||
@@ -296,6 +296,9 @@ enum { | |||
296 | EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) | 296 | EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) |
297 | #define SAL_PLAT_BUS_ERR_SECT_GUID \ | 297 | #define SAL_PLAT_BUS_ERR_SECT_GUID \ |
298 | EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) | 298 | EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) |
299 | #define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \ | ||
300 | EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \ | ||
301 | 0xca, 0x4d) | ||
299 | 302 | ||
300 | #define MAX_CACHE_ERRORS 6 | 303 | #define MAX_CACHE_ERRORS 6 |
301 | #define MAX_TLB_ERRORS 6 | 304 | #define MAX_TLB_ERRORS 6 |
@@ -879,6 +882,24 @@ extern void ia64_jump_to_sal(struct sal_to_os_boot *); | |||
879 | 882 | ||
880 | extern void ia64_sal_handler_init(void *entry_point, void *gpval); | 883 | extern void ia64_sal_handler_init(void *entry_point, void *gpval); |
881 | 884 | ||
885 | #define PALO_MAX_TLB_PURGES 0xFFFF | ||
886 | #define PALO_SIG "PALO" | ||
887 | |||
888 | struct palo_table { | ||
889 | u8 signature[4]; /* Should be "PALO" */ | ||
890 | u32 length; | ||
891 | u8 minor_revision; | ||
892 | u8 major_revision; | ||
893 | u8 checksum; | ||
894 | u8 reserved1[5]; | ||
895 | u16 max_tlb_purges; | ||
896 | u8 reserved2[6]; | ||
897 | }; | ||
898 | |||
899 | #define NPTCG_FROM_PAL 0 | ||
900 | #define NPTCG_FROM_PALO 1 | ||
901 | #define NPTCG_FROM_KERNEL_PARAMETER 2 | ||
902 | |||
882 | #endif /* __ASSEMBLY__ */ | 903 | #endif /* __ASSEMBLY__ */ |
883 | 904 | ||
884 | #endif /* _ASM_IA64_SAL_H */ | 905 | #endif /* _ASM_IA64_SAL_H */ |
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index d8393d11288d..d9b2034ed1d2 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h | |||
@@ -1,99 +1 @@ | |||
1 | #ifndef _ASM_IA64_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ASM_IA64_SEMAPHORE_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 1998-2000 Hewlett-Packard Co | ||
6 | * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/wait.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | #include <asm/atomic.h> | ||
13 | |||
14 | struct semaphore { | ||
15 | atomic_t count; | ||
16 | int sleepers; | ||
17 | wait_queue_head_t wait; | ||
18 | }; | ||
19 | |||
20 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
21 | { \ | ||
22 | .count = ATOMIC_INIT(n), \ | ||
23 | .sleepers = 0, \ | ||
24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
25 | } | ||
26 | |||
27 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
28 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
29 | |||
30 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
31 | |||
32 | static inline void | ||
33 | sema_init (struct semaphore *sem, int val) | ||
34 | { | ||
35 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
36 | } | ||
37 | |||
38 | static inline void | ||
39 | init_MUTEX (struct semaphore *sem) | ||
40 | { | ||
41 | sema_init(sem, 1); | ||
42 | } | ||
43 | |||
44 | static inline void | ||
45 | init_MUTEX_LOCKED (struct semaphore *sem) | ||
46 | { | ||
47 | sema_init(sem, 0); | ||
48 | } | ||
49 | |||
50 | extern void __down (struct semaphore * sem); | ||
51 | extern int __down_interruptible (struct semaphore * sem); | ||
52 | extern int __down_trylock (struct semaphore * sem); | ||
53 | extern void __up (struct semaphore * sem); | ||
54 | |||
55 | /* | ||
56 | * Atomically decrement the semaphore's count. If it goes negative, | ||
57 | * block the calling thread in the TASK_UNINTERRUPTIBLE state. | ||
58 | */ | ||
59 | static inline void | ||
60 | down (struct semaphore *sem) | ||
61 | { | ||
62 | might_sleep(); | ||
63 | if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) | ||
64 | __down(sem); | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Atomically decrement the semaphore's count. If it goes negative, | ||
69 | * block the calling thread in the TASK_INTERRUPTIBLE state. | ||
70 | */ | ||
71 | static inline int | ||
72 | down_interruptible (struct semaphore * sem) | ||
73 | { | ||
74 | int ret = 0; | ||
75 | |||
76 | might_sleep(); | ||
77 | if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) | ||
78 | ret = __down_interruptible(sem); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | down_trylock (struct semaphore *sem) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1) | ||
88 | ret = __down_trylock(sem); | ||
89 | return ret; | ||
90 | } | ||
91 | |||
92 | static inline void | ||
93 | up (struct semaphore * sem) | ||
94 | { | ||
95 | if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1) | ||
96 | __up(sem); | ||
97 | } | ||
98 | |||
99 | #endif /* _ASM_IA64_SEMAPHORE_H */ | ||
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 4fa733dd417a..ec5f355fb7e3 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
@@ -38,6 +38,9 @@ ia64_get_lid (void) | |||
38 | return lid.f.id << 8 | lid.f.eid; | 38 | return lid.f.id << 8 | lid.f.eid; |
39 | } | 39 | } |
40 | 40 | ||
41 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
42 | void *info, int wait); | ||
43 | |||
41 | #define hard_smp_processor_id() ia64_get_lid() | 44 | #define hard_smp_processor_id() ia64_get_lid() |
42 | 45 | ||
43 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 595112bca3cc..dff8128fa58e 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
@@ -210,6 +210,13 @@ struct task_struct; | |||
210 | extern void ia64_save_extra (struct task_struct *task); | 210 | extern void ia64_save_extra (struct task_struct *task); |
211 | extern void ia64_load_extra (struct task_struct *task); | 211 | extern void ia64_load_extra (struct task_struct *task); |
212 | 212 | ||
213 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
214 | extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); | ||
215 | # define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) | ||
216 | #else | ||
217 | # define IA64_ACCOUNT_ON_SWITCH(p,n) | ||
218 | #endif | ||
219 | |||
213 | #ifdef CONFIG_PERFMON | 220 | #ifdef CONFIG_PERFMON |
214 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); | 221 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); |
215 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) | 222 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) |
@@ -222,6 +229,7 @@ extern void ia64_load_extra (struct task_struct *task); | |||
222 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) | 229 | || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) |
223 | 230 | ||
224 | #define __switch_to(prev,next,last) do { \ | 231 | #define __switch_to(prev,next,last) do { \ |
232 | IA64_ACCOUNT_ON_SWITCH(prev, next); \ | ||
225 | if (IA64_HAS_EXTRA_STATE(prev)) \ | 233 | if (IA64_HAS_EXTRA_STATE(prev)) \ |
226 | ia64_save_extra(prev); \ | 234 | ia64_save_extra(prev); \ |
227 | if (IA64_HAS_EXTRA_STATE(next)) \ | 235 | if (IA64_HAS_EXTRA_STATE(next)) \ |
@@ -266,6 +274,10 @@ void cpu_idle_wait(void); | |||
266 | 274 | ||
267 | void default_idle(void); | 275 | void default_idle(void); |
268 | 276 | ||
277 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
278 | extern void account_system_vtime(struct task_struct *); | ||
279 | #endif | ||
280 | |||
269 | #endif /* __KERNEL__ */ | 281 | #endif /* __KERNEL__ */ |
270 | 282 | ||
271 | #endif /* __ASSEMBLY__ */ | 283 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 93d83cbe0c8c..6da8069a0f77 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
@@ -31,6 +31,12 @@ struct thread_info { | |||
31 | mm_segment_t addr_limit; /* user-level address space limit */ | 31 | mm_segment_t addr_limit; /* user-level address space limit */ |
32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | 32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ |
33 | struct restart_block restart_block; | 33 | struct restart_block restart_block; |
34 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
35 | __u64 ac_stamp; | ||
36 | __u64 ac_leave; | ||
37 | __u64 ac_stime; | ||
38 | __u64 ac_utime; | ||
39 | #endif | ||
34 | }; | 40 | }; |
35 | 41 | ||
36 | #define THREAD_SIZE KERNEL_STACK_SIZE | 42 | #define THREAD_SIZE KERNEL_STACK_SIZE |
@@ -62,9 +68,17 @@ struct thread_info { | |||
62 | #define task_stack_page(tsk) ((void *)(tsk)) | 68 | #define task_stack_page(tsk) ((void *)(tsk)) |
63 | 69 | ||
64 | #define __HAVE_THREAD_FUNCTIONS | 70 | #define __HAVE_THREAD_FUNCTIONS |
71 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
72 | #define setup_thread_stack(p, org) \ | ||
73 | *task_thread_info(p) = *task_thread_info(org); \ | ||
74 | task_thread_info(p)->ac_stime = 0; \ | ||
75 | task_thread_info(p)->ac_utime = 0; \ | ||
76 | task_thread_info(p)->task = (p); | ||
77 | #else | ||
65 | #define setup_thread_stack(p, org) \ | 78 | #define setup_thread_stack(p, org) \ |
66 | *task_thread_info(p) = *task_thread_info(org); \ | 79 | *task_thread_info(p) = *task_thread_info(org); \ |
67 | task_thread_info(p)->task = (p); | 80 | task_thread_info(p)->task = (p); |
81 | #endif | ||
68 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) | 82 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) |
69 | 83 | ||
70 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | 84 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR |
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index 26edcb750f9f..20d8a39680c2 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h | |||
@@ -64,6 +64,32 @@ struct mmu_gather { | |||
64 | struct page *pages[FREE_PTE_NR]; | 64 | struct page *pages[FREE_PTE_NR]; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct ia64_tr_entry { | ||
68 | u64 ifa; | ||
69 | u64 itir; | ||
70 | u64 pte; | ||
71 | u64 rr; | ||
72 | }; /*Record for tr entry!*/ | ||
73 | |||
74 | extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); | ||
75 | extern void ia64_ptr_entry(u64 target_mask, int slot); | ||
76 | |||
77 | extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; | ||
78 | |||
79 | /* | ||
80 | region register macros | ||
81 | */ | ||
82 | #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) | ||
83 | #define RR_VE(val) (((val) & 0x0000000000000001) << 0) | ||
84 | #define RR_VE_MASK 0x0000000000000001L | ||
85 | #define RR_VE_SHIFT 0 | ||
86 | #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) | ||
87 | #define RR_PS(val) (((val) & 0x000000000000003f) << 2) | ||
88 | #define RR_PS_MASK 0x00000000000000fcL | ||
89 | #define RR_PS_SHIFT 2 | ||
90 | #define RR_RID_MASK 0x00000000ffffff00L | ||
91 | #define RR_TO_RID(val) ((val >> 8) & 0xffffff) | ||
92 | |||
67 | /* Users of the generic TLB shootdown code must declare this storage space. */ | 93 | /* Users of the generic TLB shootdown code must declare this storage space. */ |
68 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 94 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
69 | 95 | ||
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index 7774a1cac0cc..3be25dfed164 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h | |||
@@ -17,6 +17,7 @@ | |||
17 | * Now for some TLB flushing routines. This is the kind of stuff that | 17 | * Now for some TLB flushing routines. This is the kind of stuff that |
18 | * can be very expensive, so try to avoid them whenever possible. | 18 | * can be very expensive, so try to avoid them whenever possible. |
19 | */ | 19 | */ |
20 | extern void setup_ptcg_sem(int max_purges, int from_palo); | ||
20 | 21 | ||
21 | /* | 22 | /* |
22 | * Flush everything (kernel mapping may also have changed due to | 23 | * Flush everything (kernel mapping may also have changed due to |
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h index 5d2044e529ab..1e7f6474d130 100644 --- a/include/asm-m32r/ide.h +++ b/include/asm-m32r/ide.h | |||
@@ -23,8 +23,6 @@ | |||
23 | # endif | 23 | # endif |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
27 | |||
28 | static __inline__ int ide_default_irq(unsigned long base) | 26 | static __inline__ int ide_default_irq(unsigned long base) |
29 | { | 27 | { |
30 | switch (base) { | 28 | switch (base) { |
@@ -65,14 +63,6 @@ static __inline__ unsigned long ide_default_io_base(int index) | |||
65 | } | 63 | } |
66 | } | 64 | } |
67 | 65 | ||
68 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
69 | |||
70 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
71 | #define ide_init_default_irq(base) (0) | ||
72 | #else | ||
73 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
74 | #endif | ||
75 | |||
76 | #include <asm-generic/ide_iops.h> | 66 | #include <asm-generic/ide_iops.h> |
77 | 67 | ||
78 | #endif /* __KERNEL__ */ | 68 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h index b5bf95a6f2b4..d9b2034ed1d2 100644 --- a/include/asm-m32r/semaphore.h +++ b/include/asm-m32r/semaphore.h | |||
@@ -1,144 +1 @@ | |||
1 | #ifndef _ASM_M32R_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ASM_M32R_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * Copyright (C) 1996 Linus Torvalds | ||
12 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> | ||
13 | */ | ||
14 | |||
15 | #include <linux/wait.h> | ||
16 | #include <linux/rwsem.h> | ||
17 | #include <asm/assembler.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | struct semaphore { | ||
22 | atomic_t count; | ||
23 | int sleepers; | ||
24 | wait_queue_head_t wait; | ||
25 | }; | ||
26 | |||
27 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
28 | { \ | ||
29 | .count = ATOMIC_INIT(n), \ | ||
30 | .sleepers = 0, \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
38 | |||
39 | static inline void sema_init (struct semaphore *sem, int val) | ||
40 | { | ||
41 | /* | ||
42 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
43 | * | ||
44 | * i'd rather use the more flexible initialization above, but sadly | ||
45 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well. | ||
46 | */ | ||
47 | atomic_set(&sem->count, val); | ||
48 | sem->sleepers = 0; | ||
49 | init_waitqueue_head(&sem->wait); | ||
50 | } | ||
51 | |||
52 | static inline void init_MUTEX (struct semaphore *sem) | ||
53 | { | ||
54 | sema_init(sem, 1); | ||
55 | } | ||
56 | |||
57 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
58 | { | ||
59 | sema_init(sem, 0); | ||
60 | } | ||
61 | |||
62 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
63 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
64 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
65 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
66 | |||
67 | asmlinkage void __down(struct semaphore * sem); | ||
68 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
69 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
70 | asmlinkage void __up(struct semaphore * sem); | ||
71 | |||
72 | /* | ||
73 | * Atomically decrement the semaphore's count. If it goes negative, | ||
74 | * block the calling thread in the TASK_UNINTERRUPTIBLE state. | ||
75 | */ | ||
76 | static inline void down(struct semaphore * sem) | ||
77 | { | ||
78 | might_sleep(); | ||
79 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
80 | __down(sem); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Interruptible try to acquire a semaphore. If we obtained | ||
85 | * it, return zero. If we were interrupted, returns -EINTR | ||
86 | */ | ||
87 | static inline int down_interruptible(struct semaphore * sem) | ||
88 | { | ||
89 | int result = 0; | ||
90 | |||
91 | might_sleep(); | ||
92 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
93 | result = __down_interruptible(sem); | ||
94 | |||
95 | return result; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Non-blockingly attempt to down() a semaphore. | ||
100 | * Returns zero if we acquired it | ||
101 | */ | ||
102 | static inline int down_trylock(struct semaphore * sem) | ||
103 | { | ||
104 | unsigned long flags; | ||
105 | long count; | ||
106 | int result = 0; | ||
107 | |||
108 | local_irq_save(flags); | ||
109 | __asm__ __volatile__ ( | ||
110 | "# down_trylock \n\t" | ||
111 | DCACHE_CLEAR("%0", "r4", "%1") | ||
112 | M32R_LOCK" %0, @%1; \n\t" | ||
113 | "addi %0, #-1; \n\t" | ||
114 | M32R_UNLOCK" %0, @%1; \n\t" | ||
115 | : "=&r" (count) | ||
116 | : "r" (&sem->count) | ||
117 | : "memory" | ||
118 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
119 | , "r4" | ||
120 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
121 | ); | ||
122 | local_irq_restore(flags); | ||
123 | |||
124 | if (unlikely(count < 0)) | ||
125 | result = __down_trylock(sem); | ||
126 | |||
127 | return result; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Note! This is subtle. We jump to wake people up only if | ||
132 | * the semaphore was negative (== somebody was waiting on it). | ||
133 | * The default case (no contention) will result in NO | ||
134 | * jumps for both down() and up(). | ||
135 | */ | ||
136 | static inline void up(struct semaphore * sem) | ||
137 | { | ||
138 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
139 | __up(sem); | ||
140 | } | ||
141 | |||
142 | #endif /* __KERNEL__ */ | ||
143 | |||
144 | #endif /* _ASM_M32R_SEMAPHORE_H */ | ||
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h deleted file mode 100644 index eef30ba0b499..000000000000 --- a/include/asm-m68k/semaphore-helper.h +++ /dev/null | |||
@@ -1,142 +0,0 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_HELPER_H | ||
2 | #define _M68K_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * | ||
9 | * m68k version by Andreas Schwab | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | |||
14 | /* | ||
15 | * These two _must_ execute atomically wrt each other. | ||
16 | */ | ||
17 | static inline void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc(&sem->waking); | ||
20 | } | ||
21 | |||
22 | #ifndef CONFIG_RMW_INSNS | ||
23 | extern spinlock_t semaphore_wake_lock; | ||
24 | #endif | ||
25 | |||
26 | static inline int waking_non_zero(struct semaphore *sem) | ||
27 | { | ||
28 | int ret; | ||
29 | #ifndef CONFIG_RMW_INSNS | ||
30 | unsigned long flags; | ||
31 | |||
32 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
33 | ret = 0; | ||
34 | if (atomic_read(&sem->waking) > 0) { | ||
35 | atomic_dec(&sem->waking); | ||
36 | ret = 1; | ||
37 | } | ||
38 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
39 | #else | ||
40 | int tmp1, tmp2; | ||
41 | |||
42 | __asm__ __volatile__ | ||
43 | ("1: movel %1,%2\n" | ||
44 | " jle 2f\n" | ||
45 | " subql #1,%2\n" | ||
46 | " casl %1,%2,%3\n" | ||
47 | " jne 1b\n" | ||
48 | " moveq #1,%0\n" | ||
49 | "2:" | ||
50 | : "=d" (ret), "=d" (tmp1), "=d" (tmp2) | ||
51 | : "m" (sem->waking), "0" (0), "1" (sem->waking)); | ||
52 | #endif | ||
53 | |||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * waking_non_zero_interruptible: | ||
59 | * 1 got the lock | ||
60 | * 0 go to sleep | ||
61 | * -EINTR interrupted | ||
62 | */ | ||
63 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
64 | struct task_struct *tsk) | ||
65 | { | ||
66 | int ret; | ||
67 | #ifndef CONFIG_RMW_INSNS | ||
68 | unsigned long flags; | ||
69 | |||
70 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
71 | ret = 0; | ||
72 | if (atomic_read(&sem->waking) > 0) { | ||
73 | atomic_dec(&sem->waking); | ||
74 | ret = 1; | ||
75 | } else if (signal_pending(tsk)) { | ||
76 | atomic_inc(&sem->count); | ||
77 | ret = -EINTR; | ||
78 | } | ||
79 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
80 | #else | ||
81 | int tmp1, tmp2; | ||
82 | |||
83 | __asm__ __volatile__ | ||
84 | ("1: movel %1,%2\n" | ||
85 | " jle 2f\n" | ||
86 | " subql #1,%2\n" | ||
87 | " casl %1,%2,%3\n" | ||
88 | " jne 1b\n" | ||
89 | " moveq #1,%0\n" | ||
90 | " jra %a4\n" | ||
91 | "2:" | ||
92 | : "=d" (ret), "=d" (tmp1), "=d" (tmp2) | ||
93 | : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking)); | ||
94 | if (signal_pending(tsk)) { | ||
95 | atomic_inc(&sem->count); | ||
96 | ret = -EINTR; | ||
97 | } | ||
98 | next: | ||
99 | #endif | ||
100 | |||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * waking_non_zero_trylock: | ||
106 | * 1 failed to lock | ||
107 | * 0 got the lock | ||
108 | */ | ||
109 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
110 | { | ||
111 | int ret; | ||
112 | #ifndef CONFIG_RMW_INSNS | ||
113 | unsigned long flags; | ||
114 | |||
115 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
116 | ret = 1; | ||
117 | if (atomic_read(&sem->waking) > 0) { | ||
118 | atomic_dec(&sem->waking); | ||
119 | ret = 0; | ||
120 | } else | ||
121 | atomic_inc(&sem->count); | ||
122 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
123 | #else | ||
124 | int tmp1, tmp2; | ||
125 | |||
126 | __asm__ __volatile__ | ||
127 | ("1: movel %1,%2\n" | ||
128 | " jle 2f\n" | ||
129 | " subql #1,%2\n" | ||
130 | " casl %1,%2,%3\n" | ||
131 | " jne 1b\n" | ||
132 | " moveq #0,%0\n" | ||
133 | "2:" | ||
134 | : "=d" (ret), "=d" (tmp1), "=d" (tmp2) | ||
135 | : "m" (sem->waking), "0" (1), "1" (sem->waking)); | ||
136 | if (ret) | ||
137 | atomic_inc(&sem->count); | ||
138 | #endif | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | #endif | ||
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h index 64d6b119bb0a..d9b2034ed1d2 100644 --- a/include/asm-m68k/semaphore.h +++ b/include/asm-m68k/semaphore.h | |||
@@ -1,163 +1 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _M68K_SEMAPHORE_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | #include <linux/stringify.h> | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/atomic.h> | ||
16 | |||
17 | /* | ||
18 | * Interrupt-safe semaphores.. | ||
19 | * | ||
20 | * (C) Copyright 1996 Linus Torvalds | ||
21 | * | ||
22 | * m68k version by Andreas Schwab | ||
23 | */ | ||
24 | |||
25 | |||
26 | struct semaphore { | ||
27 | atomic_t count; | ||
28 | atomic_t waking; | ||
29 | wait_queue_head_t wait; | ||
30 | }; | ||
31 | |||
32 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
33 | { \ | ||
34 | .count = ATOMIC_INIT(n), \ | ||
35 | .waking = ATOMIC_INIT(0), \ | ||
36 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
37 | } | ||
38 | |||
39 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
40 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
41 | |||
42 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
43 | |||
44 | static inline void sema_init(struct semaphore *sem, int val) | ||
45 | { | ||
46 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX (struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 1); | ||
52 | } | ||
53 | |||
54 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
55 | { | ||
56 | sema_init(sem, 0); | ||
57 | } | ||
58 | |||
59 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
60 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
61 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
62 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
63 | |||
64 | asmlinkage void __down(struct semaphore * sem); | ||
65 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
66 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
67 | asmlinkage void __up(struct semaphore * sem); | ||
68 | |||
69 | /* | ||
70 | * This is ugly, but we want the default case to fall through. | ||
71 | * "down_failed" is a special asm handler that calls the C | ||
72 | * routine that actually waits. See arch/m68k/lib/semaphore.S | ||
73 | */ | ||
74 | static inline void down(struct semaphore *sem) | ||
75 | { | ||
76 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
77 | |||
78 | might_sleep(); | ||
79 | __asm__ __volatile__( | ||
80 | "| atomic down operation\n\t" | ||
81 | "subql #1,%0@\n\t" | ||
82 | "jmi 2f\n\t" | ||
83 | "1:\n" | ||
84 | LOCK_SECTION_START(".even\n\t") | ||
85 | "2:\tpea 1b\n\t" | ||
86 | "jbra __down_failed\n" | ||
87 | LOCK_SECTION_END | ||
88 | : /* no outputs */ | ||
89 | : "a" (sem1) | ||
90 | : "memory"); | ||
91 | } | ||
92 | |||
93 | static inline int down_interruptible(struct semaphore *sem) | ||
94 | { | ||
95 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
96 | register int result __asm__ ("%d0"); | ||
97 | |||
98 | might_sleep(); | ||
99 | __asm__ __volatile__( | ||
100 | "| atomic interruptible down operation\n\t" | ||
101 | "subql #1,%1@\n\t" | ||
102 | "jmi 2f\n\t" | ||
103 | "clrl %0\n" | ||
104 | "1:\n" | ||
105 | LOCK_SECTION_START(".even\n\t") | ||
106 | "2:\tpea 1b\n\t" | ||
107 | "jbra __down_failed_interruptible\n" | ||
108 | LOCK_SECTION_END | ||
109 | : "=d" (result) | ||
110 | : "a" (sem1) | ||
111 | : "memory"); | ||
112 | return result; | ||
113 | } | ||
114 | |||
115 | static inline int down_trylock(struct semaphore *sem) | ||
116 | { | ||
117 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
118 | register int result __asm__ ("%d0"); | ||
119 | |||
120 | __asm__ __volatile__( | ||
121 | "| atomic down trylock operation\n\t" | ||
122 | "subql #1,%1@\n\t" | ||
123 | "jmi 2f\n\t" | ||
124 | "clrl %0\n" | ||
125 | "1:\n" | ||
126 | LOCK_SECTION_START(".even\n\t") | ||
127 | "2:\tpea 1b\n\t" | ||
128 | "jbra __down_failed_trylock\n" | ||
129 | LOCK_SECTION_END | ||
130 | : "=d" (result) | ||
131 | : "a" (sem1) | ||
132 | : "memory"); | ||
133 | return result; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Note! This is subtle. We jump to wake people up only if | ||
138 | * the semaphore was negative (== somebody was waiting on it). | ||
139 | * The default case (no contention) will result in NO | ||
140 | * jumps for both down() and up(). | ||
141 | */ | ||
142 | static inline void up(struct semaphore *sem) | ||
143 | { | ||
144 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
145 | |||
146 | __asm__ __volatile__( | ||
147 | "| atomic up operation\n\t" | ||
148 | "addql #1,%0@\n\t" | ||
149 | "jle 2f\n" | ||
150 | "1:\n" | ||
151 | LOCK_SECTION_START(".even\n\t") | ||
152 | "2:\t" | ||
153 | "pea 1b\n\t" | ||
154 | "jbra __up_wakeup\n" | ||
155 | LOCK_SECTION_END | ||
156 | : /* no outputs */ | ||
157 | : "a" (sem1) | ||
158 | : "memory"); | ||
159 | } | ||
160 | |||
161 | #endif /* __ASSEMBLY__ */ | ||
162 | |||
163 | #endif | ||
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h deleted file mode 100644 index 43da7bc483c7..000000000000 --- a/include/asm-m68knommu/semaphore-helper.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_HELPER_H | ||
2 | #define _M68K_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * | ||
9 | * m68k version by Andreas Schwab | ||
10 | */ | ||
11 | |||
12 | |||
13 | /* | ||
14 | * These two _must_ execute atomically wrt each other. | ||
15 | */ | ||
16 | static inline void wake_one_more(struct semaphore * sem) | ||
17 | { | ||
18 | atomic_inc(&sem->waking); | ||
19 | } | ||
20 | |||
21 | static inline int waking_non_zero(struct semaphore *sem) | ||
22 | { | ||
23 | int ret; | ||
24 | unsigned long flags; | ||
25 | |||
26 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
27 | ret = 0; | ||
28 | if (atomic_read(&sem->waking) > 0) { | ||
29 | atomic_dec(&sem->waking); | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | */ | ||
42 | static inline int waking_non_zero_interruptible(struct semaphore *sem, | ||
43 | struct task_struct *tsk) | ||
44 | { | ||
45 | int ret; | ||
46 | unsigned long flags; | ||
47 | |||
48 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
49 | ret = 0; | ||
50 | if (atomic_read(&sem->waking) > 0) { | ||
51 | atomic_dec(&sem->waking); | ||
52 | ret = 1; | ||
53 | } else if (signal_pending(tsk)) { | ||
54 | atomic_inc(&sem->count); | ||
55 | ret = -EINTR; | ||
56 | } | ||
57 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * waking_non_zero_trylock: | ||
63 | * 1 failed to lock | ||
64 | * 0 got the lock | ||
65 | */ | ||
66 | static inline int waking_non_zero_trylock(struct semaphore *sem) | ||
67 | { | ||
68 | int ret; | ||
69 | unsigned long flags; | ||
70 | |||
71 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
72 | ret = 1; | ||
73 | if (atomic_read(&sem->waking) > 0) { | ||
74 | atomic_dec(&sem->waking); | ||
75 | ret = 0; | ||
76 | } else | ||
77 | atomic_inc(&sem->count); | ||
78 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
79 | return ret; | ||
80 | } | ||
81 | |||
82 | #endif | ||
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h index 5779eb6c0689..d9b2034ed1d2 100644 --- a/include/asm-m68knommu/semaphore.h +++ b/include/asm-m68knommu/semaphore.h | |||
@@ -1,153 +1 @@ | |||
1 | #ifndef _M68K_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _M68K_SEMAPHORE_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | |||
8 | #include <linux/linkage.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | |||
13 | #include <asm/system.h> | ||
14 | #include <asm/atomic.h> | ||
15 | |||
16 | /* | ||
17 | * Interrupt-safe semaphores.. | ||
18 | * | ||
19 | * (C) Copyright 1996 Linus Torvalds | ||
20 | * | ||
21 | * m68k version by Andreas Schwab | ||
22 | */ | ||
23 | |||
24 | |||
25 | struct semaphore { | ||
26 | atomic_t count; | ||
27 | atomic_t waking; | ||
28 | wait_queue_head_t wait; | ||
29 | }; | ||
30 | |||
31 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
32 | { \ | ||
33 | .count = ATOMIC_INIT(n), \ | ||
34 | .waking = ATOMIC_INIT(0), \ | ||
35 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
36 | } | ||
37 | |||
38 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
39 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
40 | |||
41 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
42 | |||
43 | static inline void sema_init (struct semaphore *sem, int val) | ||
44 | { | ||
45 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val); | ||
46 | } | ||
47 | |||
48 | static inline void init_MUTEX (struct semaphore *sem) | ||
49 | { | ||
50 | sema_init(sem, 1); | ||
51 | } | ||
52 | |||
53 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
54 | { | ||
55 | sema_init(sem, 0); | ||
56 | } | ||
57 | |||
58 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
59 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
60 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
61 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
62 | |||
63 | asmlinkage void __down(struct semaphore * sem); | ||
64 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
65 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
66 | asmlinkage void __up(struct semaphore * sem); | ||
67 | |||
68 | extern spinlock_t semaphore_wake_lock; | ||
69 | |||
70 | /* | ||
71 | * This is ugly, but we want the default case to fall through. | ||
72 | * "down_failed" is a special asm handler that calls the C | ||
73 | * routine that actually waits. See arch/m68k/lib/semaphore.S | ||
74 | */ | ||
75 | static inline void down(struct semaphore * sem) | ||
76 | { | ||
77 | might_sleep(); | ||
78 | __asm__ __volatile__( | ||
79 | "| atomic down operation\n\t" | ||
80 | "movel %0, %%a1\n\t" | ||
81 | "lea %%pc@(1f), %%a0\n\t" | ||
82 | "subql #1, %%a1@\n\t" | ||
83 | "jmi __down_failed\n" | ||
84 | "1:" | ||
85 | : /* no outputs */ | ||
86 | : "g" (sem) | ||
87 | : "cc", "%a0", "%a1", "memory"); | ||
88 | } | ||
89 | |||
90 | static inline int down_interruptible(struct semaphore * sem) | ||
91 | { | ||
92 | int ret; | ||
93 | |||
94 | might_sleep(); | ||
95 | __asm__ __volatile__( | ||
96 | "| atomic down operation\n\t" | ||
97 | "movel %1, %%a1\n\t" | ||
98 | "lea %%pc@(1f), %%a0\n\t" | ||
99 | "subql #1, %%a1@\n\t" | ||
100 | "jmi __down_failed_interruptible\n\t" | ||
101 | "clrl %%d0\n" | ||
102 | "1: movel %%d0, %0\n" | ||
103 | : "=d" (ret) | ||
104 | : "g" (sem) | ||
105 | : "cc", "%d0", "%a0", "%a1", "memory"); | ||
106 | return(ret); | ||
107 | } | ||
108 | |||
109 | static inline int down_trylock(struct semaphore * sem) | ||
110 | { | ||
111 | register struct semaphore *sem1 __asm__ ("%a1") = sem; | ||
112 | register int result __asm__ ("%d0"); | ||
113 | |||
114 | __asm__ __volatile__( | ||
115 | "| atomic down trylock operation\n\t" | ||
116 | "subql #1,%1@\n\t" | ||
117 | "jmi 2f\n\t" | ||
118 | "clrl %0\n" | ||
119 | "1:\n" | ||
120 | ".section .text.lock,\"ax\"\n" | ||
121 | ".even\n" | ||
122 | "2:\tpea 1b\n\t" | ||
123 | "jbra __down_failed_trylock\n" | ||
124 | ".previous" | ||
125 | : "=d" (result) | ||
126 | : "a" (sem1) | ||
127 | : "memory"); | ||
128 | return result; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Note! This is subtle. We jump to wake people up only if | ||
133 | * the semaphore was negative (== somebody was waiting on it). | ||
134 | * The default case (no contention) will result in NO | ||
135 | * jumps for both down() and up(). | ||
136 | */ | ||
137 | static inline void up(struct semaphore * sem) | ||
138 | { | ||
139 | __asm__ __volatile__( | ||
140 | "| atomic up operation\n\t" | ||
141 | "movel %0, %%a1\n\t" | ||
142 | "lea %%pc@(1f), %%a0\n\t" | ||
143 | "addql #1, %%a1@\n\t" | ||
144 | "jle __up_wakeup\n" | ||
145 | "1:" | ||
146 | : /* no outputs */ | ||
147 | : "g" (sem) | ||
148 | : "cc", "%a0", "%a1", "memory"); | ||
149 | } | ||
150 | |||
151 | #endif /* __ASSEMBLY__ */ | ||
152 | |||
153 | #endif | ||
diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h index e4fe26c160ba..89655c0cdcd6 100644 --- a/include/asm-mips/mach-au1x00/au1xxx_ide.h +++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h | |||
@@ -122,24 +122,6 @@ static const struct drive_list_entry dma_black_list [] = { | |||
122 | }; | 122 | }; |
123 | #endif | 123 | #endif |
124 | 124 | ||
125 | /* function prototyping */ | ||
126 | u8 auide_inb(unsigned long port); | ||
127 | u16 auide_inw(unsigned long port); | ||
128 | u32 auide_inl(unsigned long port); | ||
129 | void auide_insw(unsigned long port, void *addr, u32 count); | ||
130 | void auide_insl(unsigned long port, void *addr, u32 count); | ||
131 | void auide_outb(u8 addr, unsigned long port); | ||
132 | void auide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port); | ||
133 | void auide_outw(u16 addr, unsigned long port); | ||
134 | void auide_outl(u32 addr, unsigned long port); | ||
135 | void auide_outsw(unsigned long port, void *addr, u32 count); | ||
136 | void auide_outsl(unsigned long port, void *addr, u32 count); | ||
137 | static void auide_tune_drive(ide_drive_t *drive, byte pio); | ||
138 | static int auide_tune_chipset(ide_drive_t *drive, u8 speed); | ||
139 | static int auide_ddma_init( _auide_hwif *auide ); | ||
140 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif); | ||
141 | int __init auide_probe(void); | ||
142 | |||
143 | /******************************************************************************* | 125 | /******************************************************************************* |
144 | * PIO Mode timing calculation : * | 126 | * PIO Mode timing calculation : * |
145 | * * | 127 | * * |
diff --git a/include/asm-mips/mach-db1x00/db1200.h b/include/asm-mips/mach-db1x00/db1200.h index a6bdac61ab49..d2e28e64932e 100644 --- a/include/asm-mips/mach-db1x00/db1200.h +++ b/include/asm-mips/mach-db1x00/db1200.h | |||
@@ -173,8 +173,8 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR; | |||
173 | #define AU1XXX_SMC91111_IRQ DB1200_ETH_INT | 173 | #define AU1XXX_SMC91111_IRQ DB1200_ETH_INT |
174 | 174 | ||
175 | #define AU1XXX_ATA_PHYS_ADDR (0x18800000) | 175 | #define AU1XXX_ATA_PHYS_ADDR (0x18800000) |
176 | #define AU1XXX_ATA_PHYS_LEN (0x100) | 176 | #define AU1XXX_ATA_REG_OFFSET (5) |
177 | #define AU1XXX_ATA_REG_OFFSET (5) | 177 | #define AU1XXX_ATA_PHYS_LEN (16 << AU1XXX_ATA_REG_OFFSET) |
178 | #define AU1XXX_ATA_INT DB1200_IDE_INT | 178 | #define AU1XXX_ATA_INT DB1200_IDE_INT |
179 | #define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1; | 179 | #define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1; |
180 | #define AU1XXX_ATA_RQSIZE 128 | 180 | #define AU1XXX_ATA_RQSIZE 128 |
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h index 4ec2b930dfbb..0f6c251f5fec 100644 --- a/include/asm-mips/mach-generic/ide.h +++ b/include/asm-mips/mach-generic/ide.h | |||
@@ -27,8 +27,6 @@ | |||
27 | # endif | 27 | # endif |
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
31 | |||
32 | static __inline__ int ide_probe_legacy(void) | 30 | static __inline__ int ide_probe_legacy(void) |
33 | { | 31 | { |
34 | #ifdef CONFIG_PCI | 32 | #ifdef CONFIG_PCI |
@@ -98,14 +96,6 @@ static __inline__ unsigned long ide_default_io_base(int index) | |||
98 | } | 96 | } |
99 | } | 97 | } |
100 | 98 | ||
101 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
102 | |||
103 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
104 | #define ide_init_default_irq(base) (0) | ||
105 | #else | ||
106 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
107 | #endif | ||
108 | |||
109 | /* MIPS port and memory-mapped I/O string operations. */ | 99 | /* MIPS port and memory-mapped I/O string operations. */ |
110 | static inline void __ide_flush_prologue(void) | 100 | static inline void __ide_flush_prologue(void) |
111 | { | 101 | { |
diff --git a/include/asm-mips/mach-pb1x00/pb1200.h b/include/asm-mips/mach-pb1x00/pb1200.h index 72213e3d02c7..edaa489b58f1 100644 --- a/include/asm-mips/mach-pb1x00/pb1200.h +++ b/include/asm-mips/mach-pb1x00/pb1200.h | |||
@@ -186,8 +186,8 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR; | |||
186 | #define AU1XXX_SMC91111_IRQ PB1200_ETH_INT | 186 | #define AU1XXX_SMC91111_IRQ PB1200_ETH_INT |
187 | 187 | ||
188 | #define AU1XXX_ATA_PHYS_ADDR (0x0C800000) | 188 | #define AU1XXX_ATA_PHYS_ADDR (0x0C800000) |
189 | #define AU1XXX_ATA_PHYS_LEN (0x100) | 189 | #define AU1XXX_ATA_REG_OFFSET (5) |
190 | #define AU1XXX_ATA_REG_OFFSET (5) | 190 | #define AU1XXX_ATA_PHYS_LEN (16 << AU1XXX_ATA_REG_OFFSET) |
191 | #define AU1XXX_ATA_INT PB1200_IDE_INT | 191 | #define AU1XXX_ATA_INT PB1200_IDE_INT |
192 | #define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1; | 192 | #define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1; |
193 | #define AU1XXX_ATA_RQSIZE 128 | 193 | #define AU1XXX_ATA_RQSIZE 128 |
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h index fdf8042b784b..d9b2034ed1d2 100644 --- a/include/asm-mips/semaphore.h +++ b/include/asm-mips/semaphore.h | |||
@@ -1,108 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996 Linus Torvalds | ||
7 | * Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2000, 01 MIPS Technologies, Inc. | ||
10 | * | ||
11 | * In all honesty, little of the old MIPS code left - the PPC64 variant was | ||
12 | * just looking nice and portable so I ripped it. Credits to whoever wrote | ||
13 | * it. | ||
14 | */ | ||
15 | #ifndef __ASM_SEMAPHORE_H | ||
16 | #define __ASM_SEMAPHORE_H | ||
17 | |||
18 | /* | ||
19 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | ||
20 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | ||
21 | * Rework semaphores to use atomic_dec_if_positive. | ||
22 | * -- Paul Mackerras (paulus@samba.org) | ||
23 | */ | ||
24 | |||
25 | #ifdef __KERNEL__ | ||
26 | |||
27 | #include <asm/atomic.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <linux/wait.h> | ||
30 | #include <linux/rwsem.h> | ||
31 | |||
32 | struct semaphore { | ||
33 | /* | ||
34 | * Note that any negative value of count is equivalent to 0, | ||
35 | * but additionally indicates that some process(es) might be | ||
36 | * sleeping on `wait'. | ||
37 | */ | ||
38 | atomic_t count; | ||
39 | wait_queue_head_t wait; | ||
40 | }; | ||
41 | |||
42 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
43 | { \ | ||
44 | .count = ATOMIC_INIT(n), \ | ||
45 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
46 | } | ||
47 | |||
48 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
49 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
50 | |||
51 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
52 | |||
53 | static inline void sema_init(struct semaphore *sem, int val) | ||
54 | { | ||
55 | atomic_set(&sem->count, val); | ||
56 | init_waitqueue_head(&sem->wait); | ||
57 | } | ||
58 | |||
59 | static inline void init_MUTEX(struct semaphore *sem) | ||
60 | { | ||
61 | sema_init(sem, 1); | ||
62 | } | ||
63 | |||
64 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
65 | { | ||
66 | sema_init(sem, 0); | ||
67 | } | ||
68 | |||
69 | extern void __down(struct semaphore * sem); | ||
70 | extern int __down_interruptible(struct semaphore * sem); | ||
71 | extern void __up(struct semaphore * sem); | ||
72 | |||
73 | static inline void down(struct semaphore * sem) | ||
74 | { | ||
75 | might_sleep(); | ||
76 | |||
77 | /* | ||
78 | * Try to get the semaphore, take the slow path if we fail. | ||
79 | */ | ||
80 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
81 | __down(sem); | ||
82 | } | ||
83 | |||
84 | static inline int down_interruptible(struct semaphore * sem) | ||
85 | { | ||
86 | int ret = 0; | ||
87 | |||
88 | might_sleep(); | ||
89 | |||
90 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
91 | ret = __down_interruptible(sem); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | static inline int down_trylock(struct semaphore * sem) | ||
96 | { | ||
97 | return atomic_dec_if_positive(&sem->count) < 0; | ||
98 | } | ||
99 | |||
100 | static inline void up(struct semaphore * sem) | ||
101 | { | ||
102 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
103 | __up(sem); | ||
104 | } | ||
105 | |||
106 | #endif /* __KERNEL__ */ | ||
107 | |||
108 | #endif /* __ASM_SEMAPHORE_H */ | ||
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h index 5a9e1ad0b253..d9b2034ed1d2 100644 --- a/include/asm-mn10300/semaphore.h +++ b/include/asm-mn10300/semaphore.h | |||
@@ -1,169 +1 @@ | |||
1 | /* MN10300 Semaphores | #include <linux/semaphore.h> | |
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #ifndef _ASM_SEMAPHORE_H | ||
12 | #define _ASM_SEMAPHORE_H | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | |||
16 | #include <linux/linkage.h> | ||
17 | #include <linux/wait.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/rwsem.h> | ||
20 | |||
21 | #define SEMAPHORE_DEBUG 0 | ||
22 | |||
23 | /* | ||
24 | * the semaphore definition | ||
25 | * - if count is >0 then there are tokens available on the semaphore for down | ||
26 | * to collect | ||
27 | * - if count is <=0 then there are no spare tokens, and anyone that wants one | ||
28 | * must wait | ||
29 | * - if wait_list is not empty, then there are processes waiting for the | ||
30 | * semaphore | ||
31 | */ | ||
32 | struct semaphore { | ||
33 | atomic_t count; /* it's not really atomic, it's | ||
34 | * just that certain modules | ||
35 | * expect to be able to access | ||
36 | * it directly */ | ||
37 | spinlock_t wait_lock; | ||
38 | struct list_head wait_list; | ||
39 | #if SEMAPHORE_DEBUG | ||
40 | unsigned __magic; | ||
41 | #endif | ||
42 | }; | ||
43 | |||
44 | #if SEMAPHORE_DEBUG | ||
45 | # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic | ||
46 | #else | ||
47 | # define __SEM_DEBUG_INIT(name) | ||
48 | #endif | ||
49 | |||
50 | |||
51 | #define __SEMAPHORE_INITIALIZER(name, init_count) \ | ||
52 | { \ | ||
53 | .count = ATOMIC_INIT(init_count), \ | ||
54 | .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
55 | .wait_list = LIST_HEAD_INIT((name).wait_list) \ | ||
56 | __SEM_DEBUG_INIT(name) \ | ||
57 | } | ||
58 | |||
59 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
60 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
61 | |||
62 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
63 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) | ||
64 | |||
65 | static inline void sema_init(struct semaphore *sem, int val) | ||
66 | { | ||
67 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
68 | } | ||
69 | |||
70 | static inline void init_MUTEX(struct semaphore *sem) | ||
71 | { | ||
72 | sema_init(sem, 1); | ||
73 | } | ||
74 | |||
75 | static inline void init_MUTEX_LOCKED(struct semaphore *sem) | ||
76 | { | ||
77 | sema_init(sem, 0); | ||
78 | } | ||
79 | |||
80 | extern void __down(struct semaphore *sem, unsigned long flags); | ||
81 | extern int __down_interruptible(struct semaphore *sem, unsigned long flags); | ||
82 | extern void __up(struct semaphore *sem); | ||
83 | |||
84 | static inline void down(struct semaphore *sem) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | int count; | ||
88 | |||
89 | #if SEMAPHORE_DEBUG | ||
90 | CHECK_MAGIC(sem->__magic); | ||
91 | #endif | ||
92 | |||
93 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
94 | count = atomic_read(&sem->count); | ||
95 | if (likely(count > 0)) { | ||
96 | atomic_set(&sem->count, count - 1); | ||
97 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
98 | } else { | ||
99 | __down(sem, flags); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static inline int down_interruptible(struct semaphore *sem) | ||
104 | { | ||
105 | unsigned long flags; | ||
106 | int count, ret = 0; | ||
107 | |||
108 | #if SEMAPHORE_DEBUG | ||
109 | CHECK_MAGIC(sem->__magic); | ||
110 | #endif | ||
111 | |||
112 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
113 | count = atomic_read(&sem->count); | ||
114 | if (likely(count > 0)) { | ||
115 | atomic_set(&sem->count, count - 1); | ||
116 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
117 | } else { | ||
118 | ret = __down_interruptible(sem, flags); | ||
119 | } | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * non-blockingly attempt to down() a semaphore. | ||
125 | * - returns zero if we acquired it | ||
126 | */ | ||
127 | static inline int down_trylock(struct semaphore *sem) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | int count, success = 0; | ||
131 | |||
132 | #if SEMAPHORE_DEBUG | ||
133 | CHECK_MAGIC(sem->__magic); | ||
134 | #endif | ||
135 | |||
136 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
137 | count = atomic_read(&sem->count); | ||
138 | if (likely(count > 0)) { | ||
139 | atomic_set(&sem->count, count - 1); | ||
140 | success = 1; | ||
141 | } | ||
142 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
143 | return !success; | ||
144 | } | ||
145 | |||
146 | static inline void up(struct semaphore *sem) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | #if SEMAPHORE_DEBUG | ||
151 | CHECK_MAGIC(sem->__magic); | ||
152 | #endif | ||
153 | |||
154 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
155 | if (!list_empty(&sem->wait_list)) | ||
156 | __up(sem); | ||
157 | else | ||
158 | atomic_set(&sem->count, atomic_read(&sem->count) + 1); | ||
159 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
160 | } | ||
161 | |||
162 | static inline int sem_getcount(struct semaphore *sem) | ||
163 | { | ||
164 | return atomic_read(&sem->count); | ||
165 | } | ||
166 | |||
167 | #endif /* __ASSEMBLY__ */ | ||
168 | |||
169 | #endif | ||
diff --git a/include/asm-parisc/ide.h b/include/asm-parisc/ide.h index be8760fbc8ee..db0c94410095 100644 --- a/include/asm-parisc/ide.h +++ b/include/asm-parisc/ide.h | |||
@@ -17,8 +17,6 @@ | |||
17 | #define MAX_HWIFS 2 | 17 | #define MAX_HWIFS 2 |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
21 | |||
22 | #define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id)) | 20 | #define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id)) |
23 | #define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id)) | 21 | #define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id)) |
24 | #define ide_request_region(from,extent,name) request_region((from), (extent), (name)) | 22 | #define ide_request_region(from,extent,name) request_region((from), (extent), (name)) |
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h deleted file mode 100644 index 387f7c1277a2..000000000000 --- a/include/asm-parisc/semaphore-helper.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | #ifndef _ASM_PARISC_SEMAPHORE_HELPER_H | ||
2 | #define _ASM_PARISC_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1999 Andrea Arcangeli | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * These two _must_ execute atomically wrt each other. | ||
13 | * | ||
14 | * This is trivially done with load_locked/store_cond, | ||
15 | * which we have. Let the rest of the losers suck eggs. | ||
16 | */ | ||
17 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc((atomic_t *)&sem->waking); | ||
20 | } | ||
21 | |||
22 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | int ret = 0; | ||
26 | |||
27 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
28 | if (sem->waking > 0) { | ||
29 | sem->waking--; | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | * | ||
42 | * We must undo the sem->count down_interruptible() increment while we are | ||
43 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
44 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
45 | */ | ||
46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
47 | struct task_struct *tsk) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | int ret = 0; | ||
51 | |||
52 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
53 | if (sem->waking > 0) { | ||
54 | sem->waking--; | ||
55 | ret = 1; | ||
56 | } else if (signal_pending(tsk)) { | ||
57 | atomic_inc(&sem->count); | ||
58 | ret = -EINTR; | ||
59 | } | ||
60 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * waking_non_zero_trylock: | ||
66 | * 1 failed to lock | ||
67 | * 0 got the lock | ||
68 | * | ||
69 | * We must undo the sem->count down_trylock() increment while we are | ||
70 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
71 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
72 | */ | ||
73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int ret = 1; | ||
77 | |||
78 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
79 | if (sem->waking <= 0) | ||
80 | atomic_inc(&sem->count); | ||
81 | else { | ||
82 | sem->waking--; | ||
83 | ret = 0; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | #endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h index a16271cdc748..d9b2034ed1d2 100644 --- a/include/asm-parisc/semaphore.h +++ b/include/asm-parisc/semaphore.h | |||
@@ -1,145 +1 @@ | |||
1 | /* SMP- and interrupt-safe semaphores. | #include <linux/semaphore.h> | |
2 | * PA-RISC version by Matthew Wilcox | ||
3 | * | ||
4 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) | ||
5 | * Copyright (C) 1996 Linus Torvalds | ||
6 | * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org > | ||
7 | * Copyright (C) 2000 Grant Grundler < grundler a debian org > | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #ifndef _ASM_PARISC_SEMAPHORE_H | ||
25 | #define _ASM_PARISC_SEMAPHORE_H | ||
26 | |||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/wait.h> | ||
29 | #include <linux/rwsem.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | |||
33 | /* | ||
34 | * The `count' is initialised to the number of people who are allowed to | ||
35 | * take the lock. (Normally we want a mutex, so this is `1'). if | ||
36 | * `count' is positive, the lock can be taken. if it's 0, no-one is | ||
37 | * waiting on it. if it's -1, at least one task is waiting. | ||
38 | */ | ||
39 | struct semaphore { | ||
40 | spinlock_t sentry; | ||
41 | int count; | ||
42 | wait_queue_head_t wait; | ||
43 | }; | ||
44 | |||
45 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
46 | { \ | ||
47 | .sentry = SPIN_LOCK_UNLOCKED, \ | ||
48 | .count = n, \ | ||
49 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
50 | } | ||
51 | |||
52 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
53 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
54 | |||
55 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
56 | |||
57 | static inline void sema_init (struct semaphore *sem, int val) | ||
58 | { | ||
59 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
60 | } | ||
61 | |||
62 | static inline void init_MUTEX (struct semaphore *sem) | ||
63 | { | ||
64 | sema_init(sem, 1); | ||
65 | } | ||
66 | |||
67 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
68 | { | ||
69 | sema_init(sem, 0); | ||
70 | } | ||
71 | |||
72 | static inline int sem_getcount(struct semaphore *sem) | ||
73 | { | ||
74 | return sem->count; | ||
75 | } | ||
76 | |||
77 | asmlinkage void __down(struct semaphore * sem); | ||
78 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
79 | asmlinkage void __up(struct semaphore * sem); | ||
80 | |||
81 | /* Semaphores can be `tried' from irq context. So we have to disable | ||
82 | * interrupts while we're messing with the semaphore. Sorry. | ||
83 | */ | ||
84 | |||
85 | static inline void down(struct semaphore * sem) | ||
86 | { | ||
87 | might_sleep(); | ||
88 | spin_lock_irq(&sem->sentry); | ||
89 | if (sem->count > 0) { | ||
90 | sem->count--; | ||
91 | } else { | ||
92 | __down(sem); | ||
93 | } | ||
94 | spin_unlock_irq(&sem->sentry); | ||
95 | } | ||
96 | |||
97 | static inline int down_interruptible(struct semaphore * sem) | ||
98 | { | ||
99 | int ret = 0; | ||
100 | might_sleep(); | ||
101 | spin_lock_irq(&sem->sentry); | ||
102 | if (sem->count > 0) { | ||
103 | sem->count--; | ||
104 | } else { | ||
105 | ret = __down_interruptible(sem); | ||
106 | } | ||
107 | spin_unlock_irq(&sem->sentry); | ||
108 | return ret; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * down_trylock returns 0 on success, 1 if we failed to get the lock. | ||
113 | * May not sleep, but must preserve irq state | ||
114 | */ | ||
115 | static inline int down_trylock(struct semaphore * sem) | ||
116 | { | ||
117 | unsigned long flags; | ||
118 | int count; | ||
119 | |||
120 | spin_lock_irqsave(&sem->sentry, flags); | ||
121 | count = sem->count - 1; | ||
122 | if (count >= 0) | ||
123 | sem->count = count; | ||
124 | spin_unlock_irqrestore(&sem->sentry, flags); | ||
125 | return (count < 0); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Note! This is subtle. We jump to wake people up only if | ||
130 | * the semaphore was negative (== somebody was waiting on it). | ||
131 | */ | ||
132 | static inline void up(struct semaphore * sem) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | spin_lock_irqsave(&sem->sentry, flags); | ||
137 | if (sem->count < 0) { | ||
138 | __up(sem); | ||
139 | } else { | ||
140 | sem->count++; | ||
141 | } | ||
142 | spin_unlock_irqrestore(&sem->sentry, flags); | ||
143 | } | ||
144 | |||
145 | #endif /* _ASM_PARISC_SEMAPHORE_H */ | ||
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h index 6d50310ecaea..3d90bf7d3d73 100644 --- a/include/asm-powerpc/ide.h +++ b/include/asm-powerpc/ide.h | |||
@@ -31,39 +31,48 @@ | |||
31 | #include <linux/hdreg.h> | 31 | #include <linux/hdreg.h> |
32 | #include <linux/ioport.h> | 32 | #include <linux/ioport.h> |
33 | 33 | ||
34 | struct ide_machdep_calls { | 34 | /* FIXME: use ide_platform host driver */ |
35 | int (*default_irq)(unsigned long base); | ||
36 | unsigned long (*default_io_base)(int index); | ||
37 | void (*ide_init_hwif)(hw_regs_t *hw, | ||
38 | unsigned long data_port, | ||
39 | unsigned long ctrl_port, | ||
40 | int *irq); | ||
41 | }; | ||
42 | |||
43 | extern struct ide_machdep_calls ppc_ide_md; | ||
44 | |||
45 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
46 | |||
47 | static __inline__ int ide_default_irq(unsigned long base) | 35 | static __inline__ int ide_default_irq(unsigned long base) |
48 | { | 36 | { |
49 | if (ppc_ide_md.default_irq) | 37 | #ifdef CONFIG_PPLUS |
50 | return ppc_ide_md.default_irq(base); | 38 | switch (base) { |
39 | case 0x1f0: return 14; | ||
40 | case 0x170: return 15; | ||
41 | } | ||
42 | #endif | ||
43 | #ifdef CONFIG_PPC_PREP | ||
44 | switch (base) { | ||
45 | case 0x1f0: return 13; | ||
46 | case 0x170: return 13; | ||
47 | case 0x1e8: return 11; | ||
48 | case 0x168: return 10; | ||
49 | case 0xfff0: return 14; /* MCP(N)750 ide0 */ | ||
50 | case 0xffe0: return 15; /* MCP(N)750 ide1 */ | ||
51 | } | ||
52 | #endif | ||
51 | return 0; | 53 | return 0; |
52 | } | 54 | } |
53 | 55 | ||
56 | /* FIXME: use ide_platform host driver */ | ||
54 | static __inline__ unsigned long ide_default_io_base(int index) | 57 | static __inline__ unsigned long ide_default_io_base(int index) |
55 | { | 58 | { |
56 | if (ppc_ide_md.default_io_base) | 59 | #ifdef CONFIG_PPLUS |
57 | return ppc_ide_md.default_io_base(index); | 60 | switch (index) { |
61 | case 0: return 0x1f0; | ||
62 | case 1: return 0x170; | ||
63 | } | ||
64 | #endif | ||
65 | #ifdef CONFIG_PPC_PREP | ||
66 | switch (index) { | ||
67 | case 0: return 0x1f0; | ||
68 | case 1: return 0x170; | ||
69 | case 2: return 0x1e8; | ||
70 | case 3: return 0x168; | ||
71 | } | ||
72 | #endif | ||
58 | return 0; | 73 | return 0; |
59 | } | 74 | } |
60 | 75 | ||
61 | #ifdef CONFIG_PCI | ||
62 | #define ide_init_default_irq(base) (0) | ||
63 | #else | ||
64 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_BLK_DEV_MPC8xx_IDE | 76 | #ifdef CONFIG_BLK_DEV_MPC8xx_IDE |
68 | #define IDE_ARCH_ACK_INTR 1 | 77 | #define IDE_ARCH_ACK_INTR 1 |
69 | #define ide_ack_intr(hwif) ((hwif)->ack_intr ? (hwif)->ack_intr(hwif) : 1) | 78 | #define ide_ack_intr(hwif) ((hwif)->ack_intr ? (hwif)->ack_intr(hwif) : 1) |
@@ -71,8 +80,6 @@ static __inline__ unsigned long ide_default_io_base(int index) | |||
71 | 80 | ||
72 | #endif /* __powerpc64__ */ | 81 | #endif /* __powerpc64__ */ |
73 | 82 | ||
74 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
75 | |||
76 | #endif /* __KERNEL__ */ | 83 | #endif /* __KERNEL__ */ |
77 | 84 | ||
78 | #endif /* _ASM_POWERPC_IDE_H */ | 85 | #endif /* _ASM_POWERPC_IDE_H */ |
diff --git a/include/asm-powerpc/mediabay.h b/include/asm-powerpc/mediabay.h index de83fe196309..df111c362a7f 100644 --- a/include/asm-powerpc/mediabay.h +++ b/include/asm-powerpc/mediabay.h | |||
@@ -22,10 +22,14 @@ int check_media_bay(struct device_node *which_bay, int what); | |||
22 | /* Number of bays in the machine or 0 */ | 22 | /* Number of bays in the machine or 0 */ |
23 | extern int media_bay_count; | 23 | extern int media_bay_count; |
24 | 24 | ||
25 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
26 | #include <linux/ide.h> | ||
27 | |||
25 | int check_media_bay_by_base(unsigned long base, int what); | 28 | int check_media_bay_by_base(unsigned long base, int what); |
26 | /* called by IDE PMAC host driver to register IDE controller for media bay */ | 29 | /* called by IDE PMAC host driver to register IDE controller for media bay */ |
27 | int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, | 30 | int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base, |
28 | int irq, int index); | 31 | int irq, ide_hwif_t *hwif); |
32 | #endif | ||
29 | 33 | ||
30 | #endif /* __KERNEL__ */ | 34 | #endif /* __KERNEL__ */ |
31 | #endif /* _PPC_MEDIABAY_H */ | 35 | #endif /* _PPC_MEDIABAY_H */ |
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h index 48dd32e07749..d9b2034ed1d2 100644 --- a/include/asm-powerpc/semaphore.h +++ b/include/asm-powerpc/semaphore.h | |||
@@ -1,94 +1 @@ | |||
1 | #ifndef _ASM_POWERPC_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _ASM_POWERPC_SEMAPHORE_H | ||
3 | |||
4 | /* | ||
5 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | ||
6 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | ||
7 | * Rework semaphores to use atomic_dec_if_positive. | ||
8 | * -- Paul Mackerras (paulus@samba.org) | ||
9 | */ | ||
10 | |||
11 | #ifdef __KERNEL__ | ||
12 | |||
13 | #include <asm/atomic.h> | ||
14 | #include <asm/system.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/rwsem.h> | ||
17 | |||
18 | struct semaphore { | ||
19 | /* | ||
20 | * Note that any negative value of count is equivalent to 0, | ||
21 | * but additionally indicates that some process(es) might be | ||
22 | * sleeping on `wait'. | ||
23 | */ | ||
24 | atomic_t count; | ||
25 | wait_queue_head_t wait; | ||
26 | }; | ||
27 | |||
28 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
29 | { \ | ||
30 | .count = ATOMIC_INIT(n), \ | ||
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
32 | } | ||
33 | |||
34 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
35 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
36 | |||
37 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
38 | |||
39 | static inline void sema_init (struct semaphore *sem, int val) | ||
40 | { | ||
41 | atomic_set(&sem->count, val); | ||
42 | init_waitqueue_head(&sem->wait); | ||
43 | } | ||
44 | |||
45 | static inline void init_MUTEX (struct semaphore *sem) | ||
46 | { | ||
47 | sema_init(sem, 1); | ||
48 | } | ||
49 | |||
50 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
51 | { | ||
52 | sema_init(sem, 0); | ||
53 | } | ||
54 | |||
55 | extern void __down(struct semaphore * sem); | ||
56 | extern int __down_interruptible(struct semaphore * sem); | ||
57 | extern void __up(struct semaphore * sem); | ||
58 | |||
59 | static inline void down(struct semaphore * sem) | ||
60 | { | ||
61 | might_sleep(); | ||
62 | |||
63 | /* | ||
64 | * Try to get the semaphore, take the slow path if we fail. | ||
65 | */ | ||
66 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
67 | __down(sem); | ||
68 | } | ||
69 | |||
70 | static inline int down_interruptible(struct semaphore * sem) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | |||
74 | might_sleep(); | ||
75 | |||
76 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
77 | ret = __down_interruptible(sem); | ||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static inline int down_trylock(struct semaphore * sem) | ||
82 | { | ||
83 | return atomic_dec_if_positive(&sem->count) < 0; | ||
84 | } | ||
85 | |||
86 | static inline void up(struct semaphore * sem) | ||
87 | { | ||
88 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
89 | __up(sem); | ||
90 | } | ||
91 | |||
92 | #endif /* __KERNEL__ */ | ||
93 | |||
94 | #endif /* _ASM_POWERPC_SEMAPHORE_H */ | ||
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index 123b557c3ff4..0818ecd30ca6 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h | |||
@@ -397,6 +397,10 @@ struct cio_iplinfo { | |||
397 | 397 | ||
398 | extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); | 398 | extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); |
399 | 399 | ||
400 | /* Function from drivers/s390/cio/chsc.c */ | ||
401 | int chsc_sstpc(void *page, unsigned int op, u16 ctrl); | ||
402 | int chsc_sstpi(void *page, void *result, size_t size); | ||
403 | |||
400 | #endif | 404 | #endif |
401 | 405 | ||
402 | #endif | 406 | #endif |
diff --git a/include/asm-s390/cpu.h b/include/asm-s390/cpu.h index 352dde194f3c..e5a6a9ba3adf 100644 --- a/include/asm-s390/cpu.h +++ b/include/asm-s390/cpu.h | |||
@@ -22,4 +22,12 @@ struct s390_idle_data { | |||
22 | 22 | ||
23 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); | 23 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); |
24 | 24 | ||
25 | void s390_idle_leave(void); | ||
26 | |||
27 | static inline void s390_idle_check(void) | ||
28 | { | ||
29 | if ((&__get_cpu_var(s390_idle))->in_idle) | ||
30 | s390_idle_leave(); | ||
31 | } | ||
32 | |||
25 | #endif /* _ASM_S390_CPU_H_ */ | 33 | #endif /* _ASM_S390_CPU_H_ */ |
diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index c00dd2b3dc50..335baf4fc64f 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h | |||
@@ -73,6 +73,7 @@ typedef struct debug_info { | |||
73 | struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; | 73 | struct dentry* debugfs_entries[DEBUG_MAX_VIEWS]; |
74 | struct debug_view* views[DEBUG_MAX_VIEWS]; | 74 | struct debug_view* views[DEBUG_MAX_VIEWS]; |
75 | char name[DEBUG_MAX_NAME_LEN]; | 75 | char name[DEBUG_MAX_NAME_LEN]; |
76 | mode_t mode; | ||
76 | } debug_info_t; | 77 | } debug_info_t; |
77 | 78 | ||
78 | typedef int (debug_header_proc_t) (debug_info_t* id, | 79 | typedef int (debug_header_proc_t) (debug_info_t* id, |
@@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level, | |||
122 | debug_info_t* debug_register(char* name, int pages, int nr_areas, | 123 | debug_info_t* debug_register(char* name, int pages, int nr_areas, |
123 | int buf_size); | 124 | int buf_size); |
124 | 125 | ||
126 | debug_info_t *debug_register_mode(char *name, int pages, int nr_areas, | ||
127 | int buf_size, mode_t mode, uid_t uid, | ||
128 | gid_t gid); | ||
129 | |||
125 | void debug_unregister(debug_info_t* id); | 130 | void debug_unregister(debug_info_t* id); |
126 | 131 | ||
127 | void debug_set_level(debug_info_t* id, int new_level); | 132 | void debug_set_level(debug_info_t* id, int new_level); |
diff --git a/include/asm-s390/extmem.h b/include/asm-s390/extmem.h index c8802c934b74..33837d756184 100644 --- a/include/asm-s390/extmem.h +++ b/include/asm-s390/extmem.h | |||
@@ -22,11 +22,12 @@ | |||
22 | #define SEGMENT_SHARED 0 | 22 | #define SEGMENT_SHARED 0 |
23 | #define SEGMENT_EXCLUSIVE 1 | 23 | #define SEGMENT_EXCLUSIVE 1 |
24 | 24 | ||
25 | extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length); | 25 | int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length); |
26 | extern void segment_unload(char *name); | 26 | void segment_unload(char *name); |
27 | extern void segment_save(char *name); | 27 | void segment_save(char *name); |
28 | extern int segment_type (char* name); | 28 | int segment_type (char* name); |
29 | extern int segment_modify_shared (char *name, int do_nonshared); | 29 | int segment_modify_shared (char *name, int do_nonshared); |
30 | void segment_warning(int rc, char *seg_name); | ||
30 | 31 | ||
31 | #endif | 32 | #endif |
32 | #endif | 33 | #endif |
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h index 31beb18cb3d1..4b7cb964ff35 100644 --- a/include/asm-s390/hardirq.h +++ b/include/asm-s390/hardirq.h | |||
@@ -32,6 +32,6 @@ typedef struct { | |||
32 | 32 | ||
33 | #define HARDIRQ_BITS 8 | 33 | #define HARDIRQ_BITS 8 |
34 | 34 | ||
35 | extern void account_ticks(u64 time); | 35 | void clock_comparator_work(void); |
36 | 36 | ||
37 | #endif /* __ASM_HARDIRQ_H */ | 37 | #endif /* __ASM_HARDIRQ_H */ |
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 801a6fd35b5b..5de3efb31445 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h | |||
@@ -56,6 +56,8 @@ | |||
56 | #define __LC_IO_INT_WORD 0x0C0 | 56 | #define __LC_IO_INT_WORD 0x0C0 |
57 | #define __LC_MCCK_CODE 0x0E8 | 57 | #define __LC_MCCK_CODE 0x0E8 |
58 | 58 | ||
59 | #define __LC_LAST_BREAK 0x110 | ||
60 | |||
59 | #define __LC_RETURN_PSW 0x200 | 61 | #define __LC_RETURN_PSW 0x200 |
60 | 62 | ||
61 | #define __LC_SAVE_AREA 0xC00 | 63 | #define __LC_SAVE_AREA 0xC00 |
@@ -80,7 +82,6 @@ | |||
80 | #define __LC_CPUID 0xC60 | 82 | #define __LC_CPUID 0xC60 |
81 | #define __LC_CPUADDR 0xC68 | 83 | #define __LC_CPUADDR 0xC68 |
82 | #define __LC_IPLDEV 0xC7C | 84 | #define __LC_IPLDEV 0xC7C |
83 | #define __LC_JIFFY_TIMER 0xC80 | ||
84 | #define __LC_CURRENT 0xC90 | 85 | #define __LC_CURRENT 0xC90 |
85 | #define __LC_INT_CLOCK 0xC98 | 86 | #define __LC_INT_CLOCK 0xC98 |
86 | #else /* __s390x__ */ | 87 | #else /* __s390x__ */ |
@@ -103,7 +104,6 @@ | |||
103 | #define __LC_CPUID 0xD80 | 104 | #define __LC_CPUID 0xD80 |
104 | #define __LC_CPUADDR 0xD88 | 105 | #define __LC_CPUADDR 0xD88 |
105 | #define __LC_IPLDEV 0xDB8 | 106 | #define __LC_IPLDEV 0xDB8 |
106 | #define __LC_JIFFY_TIMER 0xDC0 | ||
107 | #define __LC_CURRENT 0xDD8 | 107 | #define __LC_CURRENT 0xDD8 |
108 | #define __LC_INT_CLOCK 0xDE8 | 108 | #define __LC_INT_CLOCK 0xDE8 |
109 | #endif /* __s390x__ */ | 109 | #endif /* __s390x__ */ |
@@ -276,7 +276,7 @@ struct _lowcore | |||
276 | /* entry.S sensitive area end */ | 276 | /* entry.S sensitive area end */ |
277 | 277 | ||
278 | /* SMP info area: defined by DJB */ | 278 | /* SMP info area: defined by DJB */ |
279 | __u64 jiffy_timer; /* 0xc80 */ | 279 | __u64 clock_comparator; /* 0xc80 */ |
280 | __u32 ext_call_fast; /* 0xc88 */ | 280 | __u32 ext_call_fast; /* 0xc88 */ |
281 | __u32 percpu_offset; /* 0xc8c */ | 281 | __u32 percpu_offset; /* 0xc8c */ |
282 | __u32 current_task; /* 0xc90 */ | 282 | __u32 current_task; /* 0xc90 */ |
@@ -368,11 +368,12 @@ struct _lowcore | |||
368 | /* entry.S sensitive area end */ | 368 | /* entry.S sensitive area end */ |
369 | 369 | ||
370 | /* SMP info area: defined by DJB */ | 370 | /* SMP info area: defined by DJB */ |
371 | __u64 jiffy_timer; /* 0xdc0 */ | 371 | __u64 clock_comparator; /* 0xdc0 */ |
372 | __u64 ext_call_fast; /* 0xdc8 */ | 372 | __u64 ext_call_fast; /* 0xdc8 */ |
373 | __u64 percpu_offset; /* 0xdd0 */ | 373 | __u64 percpu_offset; /* 0xdd0 */ |
374 | __u64 current_task; /* 0xdd8 */ | 374 | __u64 current_task; /* 0xdd8 */ |
375 | __u64 softirq_pending; /* 0xde0 */ | 375 | __u32 softirq_pending; /* 0xde0 */ |
376 | __u32 pad_0x0de4; /* 0xde4 */ | ||
376 | __u64 int_clock; /* 0xde8 */ | 377 | __u64 int_clock; /* 0xde8 */ |
377 | __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ | 378 | __u8 pad12[0xe00-0xdf0]; /* 0xdf0 */ |
378 | 379 | ||
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 51d88912aa20..8eaf343a12a8 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h | |||
@@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task); | |||
175 | extern void show_registers(struct pt_regs *regs); | 175 | extern void show_registers(struct pt_regs *regs); |
176 | extern void show_code(struct pt_regs *regs); | 176 | extern void show_code(struct pt_regs *regs); |
177 | extern void show_trace(struct task_struct *task, unsigned long *sp); | 177 | extern void show_trace(struct task_struct *task, unsigned long *sp); |
178 | #ifdef CONFIG_64BIT | ||
179 | extern void show_last_breaking_event(struct pt_regs *regs); | ||
180 | #else | ||
181 | static inline void show_last_breaking_event(struct pt_regs *regs) | ||
182 | { | ||
183 | } | ||
184 | #endif | ||
178 | 185 | ||
179 | unsigned long get_wchan(struct task_struct *p); | 186 | unsigned long get_wchan(struct task_struct *p); |
180 | #define task_pt_regs(tsk) ((struct pt_regs *) \ | 187 | #define task_pt_regs(tsk) ((struct pt_regs *) \ |
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h index 0e7001ad8392..d9b2034ed1d2 100644 --- a/include/asm-s390/semaphore.h +++ b/include/asm-s390/semaphore.h | |||
@@ -1,107 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * include/asm-s390/semaphore.h | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * | ||
7 | * Derived from "include/asm-i386/semaphore.h" | ||
8 | * (C) Copyright 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #ifndef _S390_SEMAPHORE_H | ||
12 | #define _S390_SEMAPHORE_H | ||
13 | |||
14 | #include <asm/system.h> | ||
15 | #include <asm/atomic.h> | ||
16 | #include <linux/wait.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | |||
19 | struct semaphore { | ||
20 | /* | ||
21 | * Note that any negative value of count is equivalent to 0, | ||
22 | * but additionally indicates that some process(es) might be | ||
23 | * sleeping on `wait'. | ||
24 | */ | ||
25 | atomic_t count; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name,count) \ | ||
30 | { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } | ||
31 | |||
32 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
34 | |||
35 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
36 | |||
37 | static inline void sema_init (struct semaphore *sem, int val) | ||
38 | { | ||
39 | atomic_set(&sem->count, val); | ||
40 | init_waitqueue_head(&sem->wait); | ||
41 | } | ||
42 | |||
43 | static inline void init_MUTEX (struct semaphore *sem) | ||
44 | { | ||
45 | sema_init(sem, 1); | ||
46 | } | ||
47 | |||
48 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
49 | { | ||
50 | sema_init(sem, 0); | ||
51 | } | ||
52 | |||
53 | asmlinkage void __down(struct semaphore * sem); | ||
54 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
55 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
56 | asmlinkage void __up(struct semaphore * sem); | ||
57 | |||
58 | static inline void down(struct semaphore * sem) | ||
59 | { | ||
60 | might_sleep(); | ||
61 | if (atomic_dec_return(&sem->count) < 0) | ||
62 | __down(sem); | ||
63 | } | ||
64 | |||
65 | static inline int down_interruptible(struct semaphore * sem) | ||
66 | { | ||
67 | int ret = 0; | ||
68 | |||
69 | might_sleep(); | ||
70 | if (atomic_dec_return(&sem->count) < 0) | ||
71 | ret = __down_interruptible(sem); | ||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static inline int down_trylock(struct semaphore * sem) | ||
76 | { | ||
77 | int old_val, new_val; | ||
78 | |||
79 | /* | ||
80 | * This inline assembly atomically implements the equivalent | ||
81 | * to the following C code: | ||
82 | * old_val = sem->count.counter; | ||
83 | * if ((new_val = old_val) > 0) | ||
84 | * sem->count.counter = --new_val; | ||
85 | * In the ppc code this is called atomic_dec_if_positive. | ||
86 | */ | ||
87 | asm volatile( | ||
88 | " l %0,0(%3)\n" | ||
89 | "0: ltr %1,%0\n" | ||
90 | " jle 1f\n" | ||
91 | " ahi %1,-1\n" | ||
92 | " cs %0,%1,0(%3)\n" | ||
93 | " jl 0b\n" | ||
94 | "1:" | ||
95 | : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter) | ||
96 | : "a" (&sem->count.counter), "m" (sem->count.counter) | ||
97 | : "cc", "memory"); | ||
98 | return old_val <= 0; | ||
99 | } | ||
100 | |||
101 | static inline void up(struct semaphore * sem) | ||
102 | { | ||
103 | if (atomic_inc_return(&sem->count) <= 0) | ||
104 | __up(sem); | ||
105 | } | ||
106 | |||
107 | #endif | ||
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index c7b74326a527..6f3821a6a902 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h | |||
@@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu); | |||
90 | extern void cpu_die (void) __attribute__ ((noreturn)); | 90 | extern void cpu_die (void) __attribute__ ((noreturn)); |
91 | extern int __cpu_up (unsigned int cpu); | 91 | extern int __cpu_up (unsigned int cpu); |
92 | 92 | ||
93 | extern struct mutex smp_cpu_state_mutex; | ||
94 | extern int smp_cpu_polarization[]; | ||
95 | |||
93 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 96 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), |
94 | void *info, int wait); | 97 | void *info, int wait); |
95 | #endif | 98 | #endif |
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h new file mode 100644 index 000000000000..abe10ae15e46 --- /dev/null +++ b/include/asm-s390/sysinfo.h | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * definition for store system information stsi | ||
3 | * | ||
4 | * Copyright IBM Corp. 2001,2008 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License (version 2 only) | ||
8 | * as published by the Free Software Foundation. | ||
9 | * | ||
10 | * Author(s): Ulrich Weigand <weigand@de.ibm.com> | ||
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | ||
12 | */ | ||
13 | |||
14 | struct sysinfo_1_1_1 { | ||
15 | char reserved_0[32]; | ||
16 | char manufacturer[16]; | ||
17 | char type[4]; | ||
18 | char reserved_1[12]; | ||
19 | char model_capacity[16]; | ||
20 | char sequence[16]; | ||
21 | char plant[4]; | ||
22 | char model[16]; | ||
23 | char model_perm_cap[16]; | ||
24 | char model_temp_cap[16]; | ||
25 | char model_cap_rating[4]; | ||
26 | char model_perm_cap_rating[4]; | ||
27 | char model_temp_cap_rating[4]; | ||
28 | }; | ||
29 | |||
30 | struct sysinfo_1_2_1 { | ||
31 | char reserved_0[80]; | ||
32 | char sequence[16]; | ||
33 | char plant[4]; | ||
34 | char reserved_1[2]; | ||
35 | unsigned short cpu_address; | ||
36 | }; | ||
37 | |||
38 | struct sysinfo_1_2_2 { | ||
39 | char format; | ||
40 | char reserved_0[1]; | ||
41 | unsigned short acc_offset; | ||
42 | char reserved_1[24]; | ||
43 | unsigned int secondary_capability; | ||
44 | unsigned int capability; | ||
45 | unsigned short cpus_total; | ||
46 | unsigned short cpus_configured; | ||
47 | unsigned short cpus_standby; | ||
48 | unsigned short cpus_reserved; | ||
49 | unsigned short adjustment[0]; | ||
50 | }; | ||
51 | |||
52 | struct sysinfo_1_2_2_extension { | ||
53 | unsigned int alt_capability; | ||
54 | unsigned short alt_adjustment[0]; | ||
55 | }; | ||
56 | |||
57 | struct sysinfo_2_2_1 { | ||
58 | char reserved_0[80]; | ||
59 | char sequence[16]; | ||
60 | char plant[4]; | ||
61 | unsigned short cpu_id; | ||
62 | unsigned short cpu_address; | ||
63 | }; | ||
64 | |||
65 | struct sysinfo_2_2_2 { | ||
66 | char reserved_0[32]; | ||
67 | unsigned short lpar_number; | ||
68 | char reserved_1; | ||
69 | unsigned char characteristics; | ||
70 | unsigned short cpus_total; | ||
71 | unsigned short cpus_configured; | ||
72 | unsigned short cpus_standby; | ||
73 | unsigned short cpus_reserved; | ||
74 | char name[8]; | ||
75 | unsigned int caf; | ||
76 | char reserved_2[16]; | ||
77 | unsigned short cpus_dedicated; | ||
78 | unsigned short cpus_shared; | ||
79 | }; | ||
80 | |||
81 | #define LPAR_CHAR_DEDICATED (1 << 7) | ||
82 | #define LPAR_CHAR_SHARED (1 << 6) | ||
83 | #define LPAR_CHAR_LIMITED (1 << 5) | ||
84 | |||
85 | struct sysinfo_3_2_2 { | ||
86 | char reserved_0[31]; | ||
87 | unsigned char count; | ||
88 | struct { | ||
89 | char reserved_0[4]; | ||
90 | unsigned short cpus_total; | ||
91 | unsigned short cpus_configured; | ||
92 | unsigned short cpus_standby; | ||
93 | unsigned short cpus_reserved; | ||
94 | char name[8]; | ||
95 | unsigned int caf; | ||
96 | char cpi[16]; | ||
97 | char reserved_1[24]; | ||
98 | |||
99 | } vm[8]; | ||
100 | }; | ||
101 | |||
102 | static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) | ||
103 | { | ||
104 | register int r0 asm("0") = (fc << 28) | sel1; | ||
105 | register int r1 asm("1") = sel2; | ||
106 | |||
107 | asm volatile( | ||
108 | " stsi 0(%2)\n" | ||
109 | "0: jz 2f\n" | ||
110 | "1: lhi %0,%3\n" | ||
111 | "2:\n" | ||
112 | EX_TABLE(0b, 1b) | ||
113 | : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS) | ||
114 | : "cc", "memory"); | ||
115 | return r0; | ||
116 | } | ||
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index 15aba30601a3..92098df4d6e3 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h | |||
@@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask) | |||
406 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | 406 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
407 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | 407 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) |
408 | 408 | ||
409 | int stfle(unsigned long long *list, int doublewords); | ||
410 | |||
409 | #ifdef CONFIG_SMP | 411 | #ifdef CONFIG_SMP |
410 | 412 | ||
411 | extern void smp_ctl_set_bit(int cr, int bit); | 413 | extern void smp_ctl_set_bit(int cr, int bit); |
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h index 98229db24314..d744c3d62de5 100644 --- a/include/asm-s390/timex.h +++ b/include/asm-s390/timex.h | |||
@@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void) | |||
62 | return clk; | 62 | return clk; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void get_clock_extended(void *dest) | 65 | static inline unsigned long long get_clock_xt(void) |
66 | { | 66 | { |
67 | typedef struct { unsigned long long clk[2]; } __clock_t; | 67 | unsigned char clk[16]; |
68 | 68 | ||
69 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 69 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
70 | asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc"); | 70 | asm volatile("stcke %0" : "=Q" (clk) : : "cc"); |
71 | #else /* __GNUC__ */ | 71 | #else /* __GNUC__ */ |
72 | asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest)) | 72 | asm volatile("stcke 0(%1)" : "=m" (clk) |
73 | : "a" ((__clock_t *)dest) : "cc"); | 73 | : "a" (clk) : "cc"); |
74 | #endif /* __GNUC__ */ | 74 | #endif /* __GNUC__ */ |
75 | |||
76 | return *((unsigned long long *)&clk[1]); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | static inline cycles_t get_cycles(void) | 79 | static inline cycles_t get_cycles(void) |
@@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void) | |||
81 | 83 | ||
82 | int get_sync_clock(unsigned long long *clock); | 84 | int get_sync_clock(unsigned long long *clock); |
83 | void init_cpu_timer(void); | 85 | void init_cpu_timer(void); |
86 | unsigned long long monotonic_clock(void); | ||
84 | 87 | ||
85 | #endif | 88 | #endif |
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 35fb4f9127b2..9e57a93d7de1 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h | |||
@@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void) | |||
13 | asm volatile("ptlb" : : : "memory"); | 13 | asm volatile("ptlb" : : : "memory"); |
14 | } | 14 | } |
15 | 15 | ||
16 | #ifdef CONFIG_SMP | ||
16 | /* | 17 | /* |
17 | * Flush all tlb entries on all cpus. | 18 | * Flush all tlb entries on all cpus. |
18 | */ | 19 | */ |
20 | void smp_ptlb_all(void); | ||
21 | |||
19 | static inline void __tlb_flush_global(void) | 22 | static inline void __tlb_flush_global(void) |
20 | { | 23 | { |
21 | extern void smp_ptlb_all(void); | ||
22 | register unsigned long reg2 asm("2"); | 24 | register unsigned long reg2 asm("2"); |
23 | register unsigned long reg3 asm("3"); | 25 | register unsigned long reg3 asm("3"); |
24 | register unsigned long reg4 asm("4"); | 26 | register unsigned long reg4 asm("4"); |
@@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void) | |||
39 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); | 41 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); |
40 | } | 42 | } |
41 | 43 | ||
44 | static inline void __tlb_flush_full(struct mm_struct *mm) | ||
45 | { | ||
46 | cpumask_t local_cpumask; | ||
47 | |||
48 | preempt_disable(); | ||
49 | /* | ||
50 | * If the process only ran on the local cpu, do a local flush. | ||
51 | */ | ||
52 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
53 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | ||
54 | __tlb_flush_local(); | ||
55 | else | ||
56 | __tlb_flush_global(); | ||
57 | preempt_enable(); | ||
58 | } | ||
59 | #else | ||
60 | #define __tlb_flush_full(mm) __tlb_flush_local() | ||
61 | #endif | ||
62 | |||
42 | /* | 63 | /* |
43 | * Flush all tlb entries of a page table on all cpus. | 64 | * Flush all tlb entries of a page table on all cpus. |
44 | */ | 65 | */ |
@@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce) | |||
51 | 72 | ||
52 | static inline void __tlb_flush_mm(struct mm_struct * mm) | 73 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
53 | { | 74 | { |
54 | cpumask_t local_cpumask; | ||
55 | |||
56 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) | 75 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) |
57 | return; | 76 | return; |
58 | /* | 77 | /* |
@@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
69 | mm->context.asce_bits); | 88 | mm->context.asce_bits); |
70 | return; | 89 | return; |
71 | } | 90 | } |
72 | preempt_disable(); | 91 | __tlb_flush_full(mm); |
73 | /* | ||
74 | * If the process only ran on the local cpu, do a local flush. | ||
75 | */ | ||
76 | local_cpumask = cpumask_of_cpu(smp_processor_id()); | ||
77 | if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) | ||
78 | __tlb_flush_local(); | ||
79 | else | ||
80 | __tlb_flush_global(); | ||
81 | preempt_enable(); | ||
82 | } | 92 | } |
83 | 93 | ||
84 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 94 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) |
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h index 613aa64019da..8e97b06f298a 100644 --- a/include/asm-s390/topology.h +++ b/include/asm-s390/topology.h | |||
@@ -1,6 +1,29 @@ | |||
1 | #ifndef _ASM_S390_TOPOLOGY_H | 1 | #ifndef _ASM_S390_TOPOLOGY_H |
2 | #define _ASM_S390_TOPOLOGY_H | 2 | #define _ASM_S390_TOPOLOGY_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
5 | |||
6 | #define mc_capable() (1) | ||
7 | |||
8 | cpumask_t cpu_coregroup_map(unsigned int cpu); | ||
9 | |||
10 | int topology_set_cpu_management(int fc); | ||
11 | void topology_schedule_update(void); | ||
12 | |||
13 | #define POLARIZATION_UNKNWN (-1) | ||
14 | #define POLARIZATION_HRZ (0) | ||
15 | #define POLARIZATION_VL (1) | ||
16 | #define POLARIZATION_VM (2) | ||
17 | #define POLARIZATION_VH (3) | ||
18 | |||
19 | #ifdef CONFIG_SMP | ||
20 | void s390_init_cpu_topology(void); | ||
21 | #else | ||
22 | static inline void s390_init_cpu_topology(void) | ||
23 | { | ||
24 | }; | ||
25 | #endif | ||
26 | |||
4 | #include <asm-generic/topology.h> | 27 | #include <asm-generic/topology.h> |
5 | 28 | ||
6 | #endif /* _ASM_S390_TOPOLOGY_H */ | 29 | #endif /* _ASM_S390_TOPOLOGY_H */ |
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h index 9f8e9142dc33..58e0bdd52be4 100644 --- a/include/asm-sh/ide.h +++ b/include/asm-sh/ide.h | |||
@@ -14,9 +14,6 @@ | |||
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | 16 | ||
17 | |||
18 | #define ide_default_io_ctl(base) (0) | ||
19 | |||
20 | #include <asm-generic/ide_iops.h> | 17 | #include <asm-generic/ide_iops.h> |
21 | 18 | ||
22 | #endif /* __KERNEL__ */ | 19 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h deleted file mode 100644 index bd8230c369ca..000000000000 --- a/include/asm-sh/semaphore-helper.h +++ /dev/null | |||
@@ -1,89 +0,0 @@ | |||
1 | #ifndef __ASM_SH_SEMAPHORE_HELPER_H | ||
2 | #define __ASM_SH_SEMAPHORE_HELPER_H | ||
3 | |||
4 | /* | ||
5 | * SMP- and interrupt-safe semaphores helper functions. | ||
6 | * | ||
7 | * (C) Copyright 1996 Linus Torvalds | ||
8 | * (C) Copyright 1999 Andrea Arcangeli | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * These two _must_ execute atomically wrt each other. | ||
13 | * | ||
14 | * This is trivially done with load_locked/store_cond, | ||
15 | * which we have. Let the rest of the losers suck eggs. | ||
16 | */ | ||
17 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
18 | { | ||
19 | atomic_inc((atomic_t *)&sem->sleepers); | ||
20 | } | ||
21 | |||
22 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | int ret = 0; | ||
26 | |||
27 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
28 | if (sem->sleepers > 0) { | ||
29 | sem->sleepers--; | ||
30 | ret = 1; | ||
31 | } | ||
32 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * waking_non_zero_interruptible: | ||
38 | * 1 got the lock | ||
39 | * 0 go to sleep | ||
40 | * -EINTR interrupted | ||
41 | * | ||
42 | * We must undo the sem->count down_interruptible() increment while we are | ||
43 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
44 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
45 | */ | ||
46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
47 | struct task_struct *tsk) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | int ret = 0; | ||
51 | |||
52 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
53 | if (sem->sleepers > 0) { | ||
54 | sem->sleepers--; | ||
55 | ret = 1; | ||
56 | } else if (signal_pending(tsk)) { | ||
57 | atomic_inc(&sem->count); | ||
58 | ret = -EINTR; | ||
59 | } | ||
60 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * waking_non_zero_trylock: | ||
66 | * 1 failed to lock | ||
67 | * 0 got the lock | ||
68 | * | ||
69 | * We must undo the sem->count down_trylock() increment while we are | ||
70 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
71 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
72 | */ | ||
73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | int ret = 1; | ||
77 | |||
78 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
79 | if (sem->sleepers <= 0) | ||
80 | atomic_inc(&sem->count); | ||
81 | else { | ||
82 | sem->sleepers--; | ||
83 | ret = 0; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | #endif /* __ASM_SH_SEMAPHORE_HELPER_H */ | ||
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h index 9e5a37c4dce2..d9b2034ed1d2 100644 --- a/include/asm-sh/semaphore.h +++ b/include/asm-sh/semaphore.h | |||
@@ -1,115 +1 @@ | |||
1 | #ifndef __ASM_SH_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define __ASM_SH_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | /* | ||
8 | * SMP- and interrupt-safe semaphores. | ||
9 | * | ||
10 | * (C) Copyright 1996 Linus Torvalds | ||
11 | * | ||
12 | * SuperH verison by Niibe Yutaka | ||
13 | * (Currently no asm implementation but generic C code...) | ||
14 | */ | ||
15 | |||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | #include <linux/wait.h> | ||
19 | |||
20 | #include <asm/system.h> | ||
21 | #include <asm/atomic.h> | ||
22 | |||
23 | struct semaphore { | ||
24 | atomic_t count; | ||
25 | int sleepers; | ||
26 | wait_queue_head_t wait; | ||
27 | }; | ||
28 | |||
29 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
30 | { \ | ||
31 | .count = ATOMIC_INIT(n), \ | ||
32 | .sleepers = 0, \ | ||
33 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
34 | } | ||
35 | |||
36 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
37 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
38 | |||
39 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
40 | |||
41 | static inline void sema_init (struct semaphore *sem, int val) | ||
42 | { | ||
43 | /* | ||
44 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
45 | * | ||
46 | * i'd rather use the more flexible initialization above, but sadly | ||
47 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
48 | */ | ||
49 | atomic_set(&sem->count, val); | ||
50 | sem->sleepers = 0; | ||
51 | init_waitqueue_head(&sem->wait); | ||
52 | } | ||
53 | |||
54 | static inline void init_MUTEX (struct semaphore *sem) | ||
55 | { | ||
56 | sema_init(sem, 1); | ||
57 | } | ||
58 | |||
59 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
60 | { | ||
61 | sema_init(sem, 0); | ||
62 | } | ||
63 | |||
64 | #if 0 | ||
65 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
66 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
67 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
68 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
69 | #endif | ||
70 | |||
71 | asmlinkage void __down(struct semaphore * sem); | ||
72 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
73 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
74 | asmlinkage void __up(struct semaphore * sem); | ||
75 | |||
76 | extern spinlock_t semaphore_wake_lock; | ||
77 | |||
78 | static inline void down(struct semaphore * sem) | ||
79 | { | ||
80 | might_sleep(); | ||
81 | if (atomic_dec_return(&sem->count) < 0) | ||
82 | __down(sem); | ||
83 | } | ||
84 | |||
85 | static inline int down_interruptible(struct semaphore * sem) | ||
86 | { | ||
87 | int ret = 0; | ||
88 | |||
89 | might_sleep(); | ||
90 | if (atomic_dec_return(&sem->count) < 0) | ||
91 | ret = __down_interruptible(sem); | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | static inline int down_trylock(struct semaphore * sem) | ||
96 | { | ||
97 | int ret = 0; | ||
98 | |||
99 | if (atomic_dec_return(&sem->count) < 0) | ||
100 | ret = __down_trylock(sem); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Note! This is subtle. We jump to wake people up only if | ||
106 | * the semaphore was negative (== somebody was waiting on it). | ||
107 | */ | ||
108 | static inline void up(struct semaphore * sem) | ||
109 | { | ||
110 | if (atomic_inc_return(&sem->count) <= 0) | ||
111 | __up(sem); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | #endif /* __ASM_SH_SEMAPHORE_H */ | ||
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h index 4076cb5d1581..afd1736ed480 100644 --- a/include/asm-sparc/ide.h +++ b/include/asm-sparc/ide.h | |||
@@ -17,8 +17,6 @@ | |||
17 | #undef MAX_HWIFS | 17 | #undef MAX_HWIFS |
18 | #define MAX_HWIFS 2 | 18 | #define MAX_HWIFS 2 |
19 | 19 | ||
20 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
21 | |||
22 | #define __ide_insl(data_reg, buffer, wcount) \ | 20 | #define __ide_insl(data_reg, buffer, wcount) \ |
23 | __ide_insw(data_reg, buffer, (wcount)<<1) | 21 | __ide_insw(data_reg, buffer, (wcount)<<1) |
24 | #define __ide_outsl(data_reg, buffer, wcount) \ | 22 | #define __ide_outsl(data_reg, buffer, wcount) \ |
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h index 8018f9f4d497..d9b2034ed1d2 100644 --- a/include/asm-sparc/semaphore.h +++ b/include/asm-sparc/semaphore.h | |||
@@ -1,192 +1 @@ | |||
1 | #ifndef _SPARC_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _SPARC_SEMAPHORE_H | ||
3 | |||
4 | /* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */ | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | #include <asm/atomic.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/rwsem.h> | ||
11 | |||
12 | struct semaphore { | ||
13 | atomic24_t count; | ||
14 | int sleepers; | ||
15 | wait_queue_head_t wait; | ||
16 | }; | ||
17 | |||
18 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
19 | { \ | ||
20 | .count = ATOMIC24_INIT(n), \ | ||
21 | .sleepers = 0, \ | ||
22 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
23 | } | ||
24 | |||
25 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
26 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
27 | |||
28 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
29 | |||
30 | static inline void sema_init (struct semaphore *sem, int val) | ||
31 | { | ||
32 | atomic24_set(&sem->count, val); | ||
33 | sem->sleepers = 0; | ||
34 | init_waitqueue_head(&sem->wait); | ||
35 | } | ||
36 | |||
37 | static inline void init_MUTEX (struct semaphore *sem) | ||
38 | { | ||
39 | sema_init(sem, 1); | ||
40 | } | ||
41 | |||
42 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
43 | { | ||
44 | sema_init(sem, 0); | ||
45 | } | ||
46 | |||
47 | extern void __down(struct semaphore * sem); | ||
48 | extern int __down_interruptible(struct semaphore * sem); | ||
49 | extern int __down_trylock(struct semaphore * sem); | ||
50 | extern void __up(struct semaphore * sem); | ||
51 | |||
52 | static inline void down(struct semaphore * sem) | ||
53 | { | ||
54 | register volatile int *ptr asm("g1"); | ||
55 | register int increment asm("g2"); | ||
56 | |||
57 | might_sleep(); | ||
58 | |||
59 | ptr = &(sem->count.counter); | ||
60 | increment = 1; | ||
61 | |||
62 | __asm__ __volatile__( | ||
63 | "mov %%o7, %%g4\n\t" | ||
64 | "call ___atomic24_sub\n\t" | ||
65 | " add %%o7, 8, %%o7\n\t" | ||
66 | "tst %%g2\n\t" | ||
67 | "bl 2f\n\t" | ||
68 | " nop\n" | ||
69 | "1:\n\t" | ||
70 | ".subsection 2\n" | ||
71 | "2:\n\t" | ||
72 | "save %%sp, -64, %%sp\n\t" | ||
73 | "mov %%g1, %%l1\n\t" | ||
74 | "mov %%g5, %%l5\n\t" | ||
75 | "call %3\n\t" | ||
76 | " mov %%g1, %%o0\n\t" | ||
77 | "mov %%l1, %%g1\n\t" | ||
78 | "ba 1b\n\t" | ||
79 | " restore %%l5, %%g0, %%g5\n\t" | ||
80 | ".previous\n" | ||
81 | : "=&r" (increment) | ||
82 | : "0" (increment), "r" (ptr), "i" (__down) | ||
83 | : "g3", "g4", "g7", "memory", "cc"); | ||
84 | } | ||
85 | |||
86 | static inline int down_interruptible(struct semaphore * sem) | ||
87 | { | ||
88 | register volatile int *ptr asm("g1"); | ||
89 | register int increment asm("g2"); | ||
90 | |||
91 | might_sleep(); | ||
92 | |||
93 | ptr = &(sem->count.counter); | ||
94 | increment = 1; | ||
95 | |||
96 | __asm__ __volatile__( | ||
97 | "mov %%o7, %%g4\n\t" | ||
98 | "call ___atomic24_sub\n\t" | ||
99 | " add %%o7, 8, %%o7\n\t" | ||
100 | "tst %%g2\n\t" | ||
101 | "bl 2f\n\t" | ||
102 | " clr %%g2\n" | ||
103 | "1:\n\t" | ||
104 | ".subsection 2\n" | ||
105 | "2:\n\t" | ||
106 | "save %%sp, -64, %%sp\n\t" | ||
107 | "mov %%g1, %%l1\n\t" | ||
108 | "mov %%g5, %%l5\n\t" | ||
109 | "call %3\n\t" | ||
110 | " mov %%g1, %%o0\n\t" | ||
111 | "mov %%l1, %%g1\n\t" | ||
112 | "mov %%l5, %%g5\n\t" | ||
113 | "ba 1b\n\t" | ||
114 | " restore %%o0, %%g0, %%g2\n\t" | ||
115 | ".previous\n" | ||
116 | : "=&r" (increment) | ||
117 | : "0" (increment), "r" (ptr), "i" (__down_interruptible) | ||
118 | : "g3", "g4", "g7", "memory", "cc"); | ||
119 | |||
120 | return increment; | ||
121 | } | ||
122 | |||
123 | static inline int down_trylock(struct semaphore * sem) | ||
124 | { | ||
125 | register volatile int *ptr asm("g1"); | ||
126 | register int increment asm("g2"); | ||
127 | |||
128 | ptr = &(sem->count.counter); | ||
129 | increment = 1; | ||
130 | |||
131 | __asm__ __volatile__( | ||
132 | "mov %%o7, %%g4\n\t" | ||
133 | "call ___atomic24_sub\n\t" | ||
134 | " add %%o7, 8, %%o7\n\t" | ||
135 | "tst %%g2\n\t" | ||
136 | "bl 2f\n\t" | ||
137 | " clr %%g2\n" | ||
138 | "1:\n\t" | ||
139 | ".subsection 2\n" | ||
140 | "2:\n\t" | ||
141 | "save %%sp, -64, %%sp\n\t" | ||
142 | "mov %%g1, %%l1\n\t" | ||
143 | "mov %%g5, %%l5\n\t" | ||
144 | "call %3\n\t" | ||
145 | " mov %%g1, %%o0\n\t" | ||
146 | "mov %%l1, %%g1\n\t" | ||
147 | "mov %%l5, %%g5\n\t" | ||
148 | "ba 1b\n\t" | ||
149 | " restore %%o0, %%g0, %%g2\n\t" | ||
150 | ".previous\n" | ||
151 | : "=&r" (increment) | ||
152 | : "0" (increment), "r" (ptr), "i" (__down_trylock) | ||
153 | : "g3", "g4", "g7", "memory", "cc"); | ||
154 | |||
155 | return increment; | ||
156 | } | ||
157 | |||
158 | static inline void up(struct semaphore * sem) | ||
159 | { | ||
160 | register volatile int *ptr asm("g1"); | ||
161 | register int increment asm("g2"); | ||
162 | |||
163 | ptr = &(sem->count.counter); | ||
164 | increment = 1; | ||
165 | |||
166 | __asm__ __volatile__( | ||
167 | "mov %%o7, %%g4\n\t" | ||
168 | "call ___atomic24_add\n\t" | ||
169 | " add %%o7, 8, %%o7\n\t" | ||
170 | "tst %%g2\n\t" | ||
171 | "ble 2f\n\t" | ||
172 | " nop\n" | ||
173 | "1:\n\t" | ||
174 | ".subsection 2\n" | ||
175 | "2:\n\t" | ||
176 | "save %%sp, -64, %%sp\n\t" | ||
177 | "mov %%g1, %%l1\n\t" | ||
178 | "mov %%g5, %%l5\n\t" | ||
179 | "call %3\n\t" | ||
180 | " mov %%g1, %%o0\n\t" | ||
181 | "mov %%l1, %%g1\n\t" | ||
182 | "ba 1b\n\t" | ||
183 | " restore %%l5, %%g0, %%g5\n\t" | ||
184 | ".previous\n" | ||
185 | : "=&r" (increment) | ||
186 | : "0" (increment), "r" (ptr), "i" (__up) | ||
187 | : "g3", "g4", "g7", "memory", "cc"); | ||
188 | } | ||
189 | |||
190 | #endif /* __KERNEL__ */ | ||
191 | |||
192 | #endif /* !(_SPARC_SEMAPHORE_H) */ | ||
diff --git a/include/asm-sparc64/ide.h b/include/asm-sparc64/ide.h index ac7eb210b941..c5fdabe0b42d 100644 --- a/include/asm-sparc64/ide.h +++ b/include/asm-sparc64/ide.h | |||
@@ -24,8 +24,6 @@ | |||
24 | # endif | 24 | # endif |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
28 | |||
29 | #define __ide_insl(data_reg, buffer, wcount) \ | 27 | #define __ide_insl(data_reg, buffer, wcount) \ |
30 | __ide_insw(data_reg, buffer, (wcount)<<1) | 28 | __ide_insw(data_reg, buffer, (wcount)<<1) |
31 | #define __ide_outsl(data_reg, buffer, wcount) \ | 29 | #define __ide_outsl(data_reg, buffer, wcount) \ |
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h index 7f7c0c4e024f..d9b2034ed1d2 100644 --- a/include/asm-sparc64/semaphore.h +++ b/include/asm-sparc64/semaphore.h | |||
@@ -1,53 +1 @@ | |||
1 | #ifndef _SPARC64_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define _SPARC64_SEMAPHORE_H | ||
3 | |||
4 | /* These are actually reasonable on the V9. | ||
5 | * | ||
6 | * See asm-ppc/semaphore.h for implementation commentary, | ||
7 | * only sparc64 specific issues are commented here. | ||
8 | */ | ||
9 | #ifdef __KERNEL__ | ||
10 | |||
11 | #include <asm/atomic.h> | ||
12 | #include <asm/system.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/rwsem.h> | ||
15 | |||
16 | struct semaphore { | ||
17 | atomic_t count; | ||
18 | wait_queue_head_t wait; | ||
19 | }; | ||
20 | |||
21 | #define __SEMAPHORE_INITIALIZER(name, count) \ | ||
22 | { ATOMIC_INIT(count), \ | ||
23 | __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) } | ||
24 | |||
25 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
26 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
27 | |||
28 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
29 | |||
30 | static inline void sema_init (struct semaphore *sem, int val) | ||
31 | { | ||
32 | atomic_set(&sem->count, val); | ||
33 | init_waitqueue_head(&sem->wait); | ||
34 | } | ||
35 | |||
36 | static inline void init_MUTEX (struct semaphore *sem) | ||
37 | { | ||
38 | sema_init(sem, 1); | ||
39 | } | ||
40 | |||
41 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
42 | { | ||
43 | sema_init(sem, 0); | ||
44 | } | ||
45 | |||
46 | extern void up(struct semaphore *sem); | ||
47 | extern void down(struct semaphore *sem); | ||
48 | extern int down_trylock(struct semaphore *sem); | ||
49 | extern int down_interruptible(struct semaphore *sem); | ||
50 | |||
51 | #endif /* __KERNEL__ */ | ||
52 | |||
53 | #endif /* !(_SPARC64_SEMAPHORE_H) */ | ||
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h index ff13c34de421..d9b2034ed1d2 100644 --- a/include/asm-um/semaphore.h +++ b/include/asm-um/semaphore.h | |||
@@ -1,6 +1 @@ | |||
1 | #ifndef __UM_SEMAPHORE_H | #include <linux/semaphore.h> | |
2 | #define __UM_SEMAPHORE_H | ||
3 | |||
4 | #include "asm/arch/semaphore.h" | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h index 39fc475df6c9..5240fa1c5e08 100644 --- a/include/asm-um/tlb.h +++ b/include/asm-um/tlb.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __UM_TLB_H | 1 | #ifndef __UM_TLB_H |
2 | #define __UM_TLB_H | 2 | #define __UM_TLB_H |
3 | 3 | ||
4 | #include <linux/pagemap.h> | ||
4 | #include <linux/swap.h> | 5 | #include <linux/swap.h> |
5 | #include <asm/percpu.h> | 6 | #include <asm/percpu.h> |
6 | #include <asm/pgalloc.h> | 7 | #include <asm/pgalloc.h> |
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h index 10ed0ccf37df..d9b2034ed1d2 100644 --- a/include/asm-v850/semaphore.h +++ b/include/asm-v850/semaphore.h | |||
@@ -1,84 +1 @@ | |||
1 | #ifndef __V850_SEMAPHORE_H__ | #include <linux/semaphore.h> | |
2 | #define __V850_SEMAPHORE_H__ | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | #include <linux/wait.h> | ||
7 | #include <linux/rwsem.h> | ||
8 | |||
9 | #include <asm/atomic.h> | ||
10 | |||
11 | struct semaphore { | ||
12 | atomic_t count; | ||
13 | int sleepers; | ||
14 | wait_queue_head_t wait; | ||
15 | }; | ||
16 | |||
17 | #define __SEMAPHORE_INITIALIZER(name,count) \ | ||
18 | { ATOMIC_INIT (count), 0, \ | ||
19 | __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) } | ||
20 | |||
21 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
22 | struct semaphore name = __SEMAPHORE_INITIALIZER (name,count) | ||
23 | |||
24 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1) | ||
25 | |||
26 | static inline void sema_init (struct semaphore *sem, int val) | ||
27 | { | ||
28 | *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
29 | } | ||
30 | |||
31 | static inline void init_MUTEX (struct semaphore *sem) | ||
32 | { | ||
33 | sema_init (sem, 1); | ||
34 | } | ||
35 | |||
36 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
37 | { | ||
38 | sema_init (sem, 0); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * special register calling convention | ||
43 | */ | ||
44 | asmlinkage void __down_failed (void); | ||
45 | asmlinkage int __down_interruptible_failed (void); | ||
46 | asmlinkage int __down_trylock_failed (void); | ||
47 | asmlinkage void __up_wakeup (void); | ||
48 | |||
49 | extern void __down (struct semaphore * sem); | ||
50 | extern int __down_interruptible (struct semaphore * sem); | ||
51 | extern int __down_trylock (struct semaphore * sem); | ||
52 | extern void __up (struct semaphore * sem); | ||
53 | |||
54 | static inline void down (struct semaphore * sem) | ||
55 | { | ||
56 | might_sleep(); | ||
57 | if (atomic_dec_return (&sem->count) < 0) | ||
58 | __down (sem); | ||
59 | } | ||
60 | |||
61 | static inline int down_interruptible (struct semaphore * sem) | ||
62 | { | ||
63 | int ret = 0; | ||
64 | might_sleep(); | ||
65 | if (atomic_dec_return (&sem->count) < 0) | ||
66 | ret = __down_interruptible (sem); | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | static inline int down_trylock (struct semaphore *sem) | ||
71 | { | ||
72 | int ret = 0; | ||
73 | if (atomic_dec_return (&sem->count) < 0) | ||
74 | ret = __down_trylock (sem); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static inline void up (struct semaphore * sem) | ||
79 | { | ||
80 | if (atomic_inc_return (&sem->count) <= 0) | ||
81 | __up (sem); | ||
82 | } | ||
83 | |||
84 | #endif /* __V850_SEMAPHORE_H__ */ | ||
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index 3b8160a2b47e..1e3554596f72 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild | |||
@@ -10,6 +10,7 @@ header-y += prctl.h | |||
10 | header-y += ptrace-abi.h | 10 | header-y += ptrace-abi.h |
11 | header-y += sigcontext32.h | 11 | header-y += sigcontext32.h |
12 | header-y += ucontext.h | 12 | header-y += ucontext.h |
13 | header-y += processor-flags.h | ||
13 | 14 | ||
14 | unifdef-y += e820.h | 15 | unifdef-y += e820.h |
15 | unifdef-y += ist.h | 16 | unifdef-y += ist.h |
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h index d2b6e11d3e97..714207a1c387 100644 --- a/include/asm-x86/a.out-core.h +++ b/include/asm-x86/a.out-core.h | |||
@@ -29,8 +29,9 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
29 | dump->magic = CMAGIC; | 29 | dump->magic = CMAGIC; |
30 | dump->start_code = 0; | 30 | dump->start_code = 0; |
31 | dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); | 31 | dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); |
32 | dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; | 32 | dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; |
33 | dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; | 33 | dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) |
34 | >> PAGE_SHIFT; | ||
34 | dump->u_dsize -= dump->u_tsize; | 35 | dump->u_dsize -= dump->u_tsize; |
35 | dump->u_ssize = 0; | 36 | dump->u_ssize = 0; |
36 | dump->u_debugreg[0] = current->thread.debugreg0; | 37 | dump->u_debugreg[0] = current->thread.debugreg0; |
@@ -43,7 +44,8 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
43 | dump->u_debugreg[7] = current->thread.debugreg7; | 44 | dump->u_debugreg[7] = current->thread.debugreg7; |
44 | 45 | ||
45 | if (dump->start_stack < TASK_SIZE) | 46 | if (dump->start_stack < TASK_SIZE) |
46 | dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; | 47 | dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) |
48 | >> PAGE_SHIFT; | ||
47 | 49 | ||
48 | dump->regs.bx = regs->bx; | 50 | dump->regs.bx = regs->bx; |
49 | dump->regs.cx = regs->cx; | 51 | dump->regs.cx = regs->cx; |
@@ -55,7 +57,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
55 | dump->regs.ds = (u16)regs->ds; | 57 | dump->regs.ds = (u16)regs->ds; |
56 | dump->regs.es = (u16)regs->es; | 58 | dump->regs.es = (u16)regs->es; |
57 | dump->regs.fs = (u16)regs->fs; | 59 | dump->regs.fs = (u16)regs->fs; |
58 | savesegment(gs,gs); | 60 | savesegment(gs, gs); |
59 | dump->regs.orig_ax = regs->orig_ax; | 61 | dump->regs.orig_ax = regs->orig_ax; |
60 | dump->regs.ip = regs->ip; | 62 | dump->regs.ip = regs->ip; |
61 | dump->regs.cs = (u16)regs->cs; | 63 | dump->regs.cs = (u16)regs->cs; |
@@ -63,7 +65,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) | |||
63 | dump->regs.sp = regs->sp; | 65 | dump->regs.sp = regs->sp; |
64 | dump->regs.ss = (u16)regs->ss; | 66 | dump->regs.ss = (u16)regs->ss; |
65 | 67 | ||
66 | dump->u_fpvalid = dump_fpu (regs, &dump->i387); | 68 | dump->u_fpvalid = dump_fpu(regs, &dump->i387); |
67 | } | 69 | } |
68 | 70 | ||
69 | #endif /* CONFIG_X86_32 */ | 71 | #endif /* CONFIG_X86_32 */ |
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 7a72d6aa50be..14411c9de46f 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h | |||
@@ -67,16 +67,16 @@ int __acpi_release_global_lock(unsigned int *lock); | |||
67 | */ | 67 | */ |
68 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ | 68 | #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ |
69 | asm("divl %2;" \ | 69 | asm("divl %2;" \ |
70 | :"=a"(q32), "=d"(r32) \ | 70 | : "=a"(q32), "=d"(r32) \ |
71 | :"r"(d32), \ | 71 | : "r"(d32), \ |
72 | "0"(n_lo), "1"(n_hi)) | 72 | "0"(n_lo), "1"(n_hi)) |
73 | 73 | ||
74 | 74 | ||
75 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ | 75 | #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ |
76 | asm("shrl $1,%2 ;" \ | 76 | asm("shrl $1,%2 ;" \ |
77 | "rcrl $1,%3;" \ | 77 | "rcrl $1,%3;" \ |
78 | :"=r"(n_hi), "=r"(n_lo) \ | 78 | : "=r"(n_hi), "=r"(n_lo) \ |
79 | :"0"(n_hi), "1"(n_lo)) | 79 | : "0"(n_hi), "1"(n_lo)) |
80 | 80 | ||
81 | #ifdef CONFIG_ACPI | 81 | #ifdef CONFIG_ACPI |
82 | extern int acpi_lapic; | 82 | extern int acpi_lapic; |
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h index d8bacf3c4b08..1f6a9ca10126 100644 --- a/include/asm-x86/alternative.h +++ b/include/asm-x86/alternative.h | |||
@@ -66,8 +66,8 @@ extern void alternatives_smp_module_del(struct module *mod); | |||
66 | extern void alternatives_smp_switch(int smp); | 66 | extern void alternatives_smp_switch(int smp); |
67 | #else | 67 | #else |
68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | 68 | static inline void alternatives_smp_module_add(struct module *mod, char *name, |
69 | void *locks, void *locks_end, | 69 | void *locks, void *locks_end, |
70 | void *text, void *text_end) {} | 70 | void *text, void *text_end) {} |
71 | static inline void alternatives_smp_module_del(struct module *mod) {} | 71 | static inline void alternatives_smp_module_del(struct module *mod) {} |
72 | static inline void alternatives_smp_switch(int smp) {} | 72 | static inline void alternatives_smp_switch(int smp) {} |
73 | #endif /* CONFIG_SMP */ | 73 | #endif /* CONFIG_SMP */ |
@@ -148,14 +148,34 @@ struct paravirt_patch_site; | |||
148 | void apply_paravirt(struct paravirt_patch_site *start, | 148 | void apply_paravirt(struct paravirt_patch_site *start, |
149 | struct paravirt_patch_site *end); | 149 | struct paravirt_patch_site *end); |
150 | #else | 150 | #else |
151 | static inline void | 151 | static inline void apply_paravirt(struct paravirt_patch_site *start, |
152 | apply_paravirt(struct paravirt_patch_site *start, | 152 | struct paravirt_patch_site *end) |
153 | struct paravirt_patch_site *end) | ||
154 | {} | 153 | {} |
155 | #define __parainstructions NULL | 154 | #define __parainstructions NULL |
156 | #define __parainstructions_end NULL | 155 | #define __parainstructions_end NULL |
157 | #endif | 156 | #endif |
158 | 157 | ||
159 | extern void text_poke(void *addr, unsigned char *opcode, int len); | 158 | extern void add_nops(void *insns, unsigned int len); |
159 | |||
160 | /* | ||
161 | * Clear and restore the kernel write-protection flag on the local CPU. | ||
162 | * Allows the kernel to edit read-only pages. | ||
163 | * Side-effect: any interrupt handler running between save and restore will have | ||
164 | * the ability to write to read-only pages. | ||
165 | * | ||
166 | * Warning: | ||
167 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | ||
168 | * no thread can be preempted in the instructions being modified (no iret to an | ||
169 | * invalid instruction possible) or if the instructions are changed from a | ||
170 | * consistent state to another consistent state atomically. | ||
171 | * More care must be taken when modifying code in the SMP case because of | ||
172 | * Intel's errata. | ||
173 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | ||
174 | * inconsistent instruction while you patch. | ||
175 | * The _early version expects the memory to already be RW. | ||
176 | */ | ||
177 | |||
178 | extern void *text_poke(void *addr, const void *opcode, size_t len); | ||
179 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
160 | 180 | ||
161 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 181 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index bcfc07fd3661..be9639a9a186 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -44,7 +44,6 @@ extern int apic_runs_main_timer; | |||
44 | extern int ioapic_force; | 44 | extern int ioapic_force; |
45 | extern int disable_apic; | 45 | extern int disable_apic; |
46 | extern int disable_apic_timer; | 46 | extern int disable_apic_timer; |
47 | extern unsigned boot_cpu_id; | ||
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Basic functions accessing APICs. | 49 | * Basic functions accessing APICs. |
@@ -59,6 +58,8 @@ extern unsigned boot_cpu_id; | |||
59 | #define setup_secondary_clock setup_secondary_APIC_clock | 58 | #define setup_secondary_clock setup_secondary_APIC_clock |
60 | #endif | 59 | #endif |
61 | 60 | ||
61 | extern int is_vsmp_box(void); | ||
62 | |||
62 | static inline void native_apic_write(unsigned long reg, u32 v) | 63 | static inline void native_apic_write(unsigned long reg, u32 v) |
63 | { | 64 | { |
64 | *((volatile u32 *)(APIC_BASE + reg)) = v; | 65 | *((volatile u32 *)(APIC_BASE + reg)) = v; |
@@ -66,7 +67,7 @@ static inline void native_apic_write(unsigned long reg, u32 v) | |||
66 | 67 | ||
67 | static inline void native_apic_write_atomic(unsigned long reg, u32 v) | 68 | static inline void native_apic_write_atomic(unsigned long reg, u32 v) |
68 | { | 69 | { |
69 | (void) xchg((u32*)(APIC_BASE + reg), v); | 70 | (void)xchg((u32 *)(APIC_BASE + reg), v); |
70 | } | 71 | } |
71 | 72 | ||
72 | static inline u32 native_apic_read(unsigned long reg) | 73 | static inline u32 native_apic_read(unsigned long reg) |
@@ -123,7 +124,7 @@ extern void enable_NMI_through_LVT0(void); | |||
123 | * On 32bit this is mach-xxx local | 124 | * On 32bit this is mach-xxx local |
124 | */ | 125 | */ |
125 | #ifdef CONFIG_X86_64 | 126 | #ifdef CONFIG_X86_64 |
126 | extern void setup_apic_routing(void); | 127 | extern void early_init_lapic_mapping(void); |
127 | #endif | 128 | #endif |
128 | 129 | ||
129 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); | 130 | extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); |
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index 550af7a6f88e..6b9008c78731 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h | |||
@@ -12,17 +12,15 @@ | |||
12 | 12 | ||
13 | #define APIC_ID 0x20 | 13 | #define APIC_ID 0x20 |
14 | 14 | ||
15 | #ifdef CONFIG_X86_64 | ||
16 | # define APIC_ID_MASK (0xFFu<<24) | ||
17 | # define GET_APIC_ID(x) (((x)>>24)&0xFFu) | ||
18 | # define SET_APIC_ID(x) (((x)<<24)) | ||
19 | #endif | ||
20 | |||
21 | #define APIC_LVR 0x30 | 15 | #define APIC_LVR 0x30 |
22 | #define APIC_LVR_MASK 0xFF00FF | 16 | #define APIC_LVR_MASK 0xFF00FF |
23 | #define GET_APIC_VERSION(x) ((x)&0xFFu) | 17 | #define GET_APIC_VERSION(x) ((x) & 0xFFu) |
24 | #define GET_APIC_MAXLVT(x) (((x)>>16)&0xFFu) | 18 | #define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) |
25 | #define APIC_INTEGRATED(x) ((x)&0xF0u) | 19 | #ifdef CONFIG_X86_32 |
20 | # define APIC_INTEGRATED(x) ((x) & 0xF0u) | ||
21 | #else | ||
22 | # define APIC_INTEGRATED(x) (1) | ||
23 | #endif | ||
26 | #define APIC_XAPIC(x) ((x) >= 0x14) | 24 | #define APIC_XAPIC(x) ((x) >= 0x14) |
27 | #define APIC_TASKPRI 0x80 | 25 | #define APIC_TASKPRI 0x80 |
28 | #define APIC_TPRI_MASK 0xFFu | 26 | #define APIC_TPRI_MASK 0xFFu |
@@ -33,16 +31,16 @@ | |||
33 | #define APIC_EIO_ACK 0x0 | 31 | #define APIC_EIO_ACK 0x0 |
34 | #define APIC_RRR 0xC0 | 32 | #define APIC_RRR 0xC0 |
35 | #define APIC_LDR 0xD0 | 33 | #define APIC_LDR 0xD0 |
36 | #define APIC_LDR_MASK (0xFFu<<24) | 34 | #define APIC_LDR_MASK (0xFFu << 24) |
37 | #define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFFu) | 35 | #define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu) |
38 | #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) | 36 | #define SET_APIC_LOGICAL_ID(x) (((x) << 24)) |
39 | #define APIC_ALL_CPUS 0xFFu | 37 | #define APIC_ALL_CPUS 0xFFu |
40 | #define APIC_DFR 0xE0 | 38 | #define APIC_DFR 0xE0 |
41 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul | 39 | #define APIC_DFR_CLUSTER 0x0FFFFFFFul |
42 | #define APIC_DFR_FLAT 0xFFFFFFFFul | 40 | #define APIC_DFR_FLAT 0xFFFFFFFFul |
43 | #define APIC_SPIV 0xF0 | 41 | #define APIC_SPIV 0xF0 |
44 | #define APIC_SPIV_FOCUS_DISABLED (1<<9) | 42 | #define APIC_SPIV_FOCUS_DISABLED (1 << 9) |
45 | #define APIC_SPIV_APIC_ENABLED (1<<8) | 43 | #define APIC_SPIV_APIC_ENABLED (1 << 8) |
46 | #define APIC_ISR 0x100 | 44 | #define APIC_ISR 0x100 |
47 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ | 45 | #define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ |
48 | #define APIC_TMR 0x180 | 46 | #define APIC_TMR 0x180 |
@@ -78,27 +76,27 @@ | |||
78 | #define APIC_DM_EXTINT 0x00700 | 76 | #define APIC_DM_EXTINT 0x00700 |
79 | #define APIC_VECTOR_MASK 0x000FF | 77 | #define APIC_VECTOR_MASK 0x000FF |
80 | #define APIC_ICR2 0x310 | 78 | #define APIC_ICR2 0x310 |
81 | #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) | 79 | #define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF) |
82 | #define SET_APIC_DEST_FIELD(x) ((x)<<24) | 80 | #define SET_APIC_DEST_FIELD(x) ((x) << 24) |
83 | #define APIC_LVTT 0x320 | 81 | #define APIC_LVTT 0x320 |
84 | #define APIC_LVTTHMR 0x330 | 82 | #define APIC_LVTTHMR 0x330 |
85 | #define APIC_LVTPC 0x340 | 83 | #define APIC_LVTPC 0x340 |
86 | #define APIC_LVT0 0x350 | 84 | #define APIC_LVT0 0x350 |
87 | #define APIC_LVT_TIMER_BASE_MASK (0x3<<18) | 85 | #define APIC_LVT_TIMER_BASE_MASK (0x3 << 18) |
88 | #define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) | 86 | #define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3) |
89 | #define SET_APIC_TIMER_BASE(x) (((x)<<18)) | 87 | #define SET_APIC_TIMER_BASE(x) (((x) << 18)) |
90 | #define APIC_TIMER_BASE_CLKIN 0x0 | 88 | #define APIC_TIMER_BASE_CLKIN 0x0 |
91 | #define APIC_TIMER_BASE_TMBASE 0x1 | 89 | #define APIC_TIMER_BASE_TMBASE 0x1 |
92 | #define APIC_TIMER_BASE_DIV 0x2 | 90 | #define APIC_TIMER_BASE_DIV 0x2 |
93 | #define APIC_LVT_TIMER_PERIODIC (1<<17) | 91 | #define APIC_LVT_TIMER_PERIODIC (1 << 17) |
94 | #define APIC_LVT_MASKED (1<<16) | 92 | #define APIC_LVT_MASKED (1 << 16) |
95 | #define APIC_LVT_LEVEL_TRIGGER (1<<15) | 93 | #define APIC_LVT_LEVEL_TRIGGER (1 << 15) |
96 | #define APIC_LVT_REMOTE_IRR (1<<14) | 94 | #define APIC_LVT_REMOTE_IRR (1 << 14) |
97 | #define APIC_INPUT_POLARITY (1<<13) | 95 | #define APIC_INPUT_POLARITY (1 << 13) |
98 | #define APIC_SEND_PENDING (1<<12) | 96 | #define APIC_SEND_PENDING (1 << 12) |
99 | #define APIC_MODE_MASK 0x700 | 97 | #define APIC_MODE_MASK 0x700 |
100 | #define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) | 98 | #define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7) |
101 | #define SET_APIC_DELIVERY_MODE(x, y) (((x)&~0x700)|((y)<<8)) | 99 | #define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8)) |
102 | #define APIC_MODE_FIXED 0x0 | 100 | #define APIC_MODE_FIXED 0x0 |
103 | #define APIC_MODE_NMI 0x4 | 101 | #define APIC_MODE_NMI 0x4 |
104 | #define APIC_MODE_EXTINT 0x7 | 102 | #define APIC_MODE_EXTINT 0x7 |
@@ -107,7 +105,7 @@ | |||
107 | #define APIC_TMICT 0x380 | 105 | #define APIC_TMICT 0x380 |
108 | #define APIC_TMCCT 0x390 | 106 | #define APIC_TMCCT 0x390 |
109 | #define APIC_TDCR 0x3E0 | 107 | #define APIC_TDCR 0x3E0 |
110 | #define APIC_TDR_DIV_TMBASE (1<<2) | 108 | #define APIC_TDR_DIV_TMBASE (1 << 2) |
111 | #define APIC_TDR_DIV_1 0xB | 109 | #define APIC_TDR_DIV_1 0xB |
112 | #define APIC_TDR_DIV_2 0x0 | 110 | #define APIC_TDR_DIV_2 0x0 |
113 | #define APIC_TDR_DIV_4 0x1 | 111 | #define APIC_TDR_DIV_4 0x1 |
@@ -117,14 +115,14 @@ | |||
117 | #define APIC_TDR_DIV_64 0x9 | 115 | #define APIC_TDR_DIV_64 0x9 |
118 | #define APIC_TDR_DIV_128 0xA | 116 | #define APIC_TDR_DIV_128 0xA |
119 | #define APIC_EILVT0 0x500 | 117 | #define APIC_EILVT0 0x500 |
120 | #define APIC_EILVT_NR_AMD_K8 1 /* Number of extended interrupts */ | 118 | #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ |
121 | #define APIC_EILVT_NR_AMD_10H 4 | 119 | #define APIC_EILVT_NR_AMD_10H 4 |
122 | #define APIC_EILVT_LVTOFF(x) (((x)>>4)&0xF) | 120 | #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) |
123 | #define APIC_EILVT_MSG_FIX 0x0 | 121 | #define APIC_EILVT_MSG_FIX 0x0 |
124 | #define APIC_EILVT_MSG_SMI 0x2 | 122 | #define APIC_EILVT_MSG_SMI 0x2 |
125 | #define APIC_EILVT_MSG_NMI 0x4 | 123 | #define APIC_EILVT_MSG_NMI 0x4 |
126 | #define APIC_EILVT_MSG_EXT 0x7 | 124 | #define APIC_EILVT_MSG_EXT 0x7 |
127 | #define APIC_EILVT_MASKED (1<<16) | 125 | #define APIC_EILVT_MASKED (1 << 16) |
128 | #define APIC_EILVT1 0x510 | 126 | #define APIC_EILVT1 0x510 |
129 | #define APIC_EILVT2 0x520 | 127 | #define APIC_EILVT2 0x520 |
130 | #define APIC_EILVT3 0x530 | 128 | #define APIC_EILVT3 0x530 |
@@ -135,7 +133,7 @@ | |||
135 | # define MAX_IO_APICS 64 | 133 | # define MAX_IO_APICS 64 |
136 | #else | 134 | #else |
137 | # define MAX_IO_APICS 128 | 135 | # define MAX_IO_APICS 128 |
138 | # define MAX_LOCAL_APIC 256 | 136 | # define MAX_LOCAL_APIC 32768 |
139 | #endif | 137 | #endif |
140 | 138 | ||
141 | /* | 139 | /* |
@@ -408,6 +406,9 @@ struct local_apic { | |||
408 | 406 | ||
409 | #undef u32 | 407 | #undef u32 |
410 | 408 | ||
411 | #define BAD_APICID 0xFFu | 409 | #ifdef CONFIG_X86_32 |
412 | 410 | #define BAD_APICID 0xFFu | |
411 | #else | ||
412 | #define BAD_APICID 0xFFFFu | ||
413 | #endif | ||
413 | #endif | 414 | #endif |
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h index 437aac801711..21a4825148c0 100644 --- a/include/asm-x86/atomic_32.h +++ b/include/asm-x86/atomic_32.h | |||
@@ -15,138 +15,133 @@ | |||
15 | * on us. We need to use _exactly_ the address the user gave us, | 15 | * on us. We need to use _exactly_ the address the user gave us, |
16 | * not some alias that contains the same information. | 16 | * not some alias that contains the same information. |
17 | */ | 17 | */ |
18 | typedef struct { int counter; } atomic_t; | 18 | typedef struct { |
19 | int counter; | ||
20 | } atomic_t; | ||
19 | 21 | ||
20 | #define ATOMIC_INIT(i) { (i) } | 22 | #define ATOMIC_INIT(i) { (i) } |
21 | 23 | ||
22 | /** | 24 | /** |
23 | * atomic_read - read atomic variable | 25 | * atomic_read - read atomic variable |
24 | * @v: pointer of type atomic_t | 26 | * @v: pointer of type atomic_t |
25 | * | 27 | * |
26 | * Atomically reads the value of @v. | 28 | * Atomically reads the value of @v. |
27 | */ | 29 | */ |
28 | #define atomic_read(v) ((v)->counter) | 30 | #define atomic_read(v) ((v)->counter) |
29 | 31 | ||
30 | /** | 32 | /** |
31 | * atomic_set - set atomic variable | 33 | * atomic_set - set atomic variable |
32 | * @v: pointer of type atomic_t | 34 | * @v: pointer of type atomic_t |
33 | * @i: required value | 35 | * @i: required value |
34 | * | 36 | * |
35 | * Atomically sets the value of @v to @i. | 37 | * Atomically sets the value of @v to @i. |
36 | */ | 38 | */ |
37 | #define atomic_set(v,i) (((v)->counter) = (i)) | 39 | #define atomic_set(v, i) (((v)->counter) = (i)) |
38 | 40 | ||
39 | /** | 41 | /** |
40 | * atomic_add - add integer to atomic variable | 42 | * atomic_add - add integer to atomic variable |
41 | * @i: integer value to add | 43 | * @i: integer value to add |
42 | * @v: pointer of type atomic_t | 44 | * @v: pointer of type atomic_t |
43 | * | 45 | * |
44 | * Atomically adds @i to @v. | 46 | * Atomically adds @i to @v. |
45 | */ | 47 | */ |
46 | static __inline__ void atomic_add(int i, atomic_t *v) | 48 | static inline void atomic_add(int i, atomic_t *v) |
47 | { | 49 | { |
48 | __asm__ __volatile__( | 50 | asm volatile(LOCK_PREFIX "addl %1,%0" |
49 | LOCK_PREFIX "addl %1,%0" | 51 | : "+m" (v->counter) |
50 | :"+m" (v->counter) | 52 | : "ir" (i)); |
51 | :"ir" (i)); | ||
52 | } | 53 | } |
53 | 54 | ||
54 | /** | 55 | /** |
55 | * atomic_sub - subtract integer from atomic variable | 56 | * atomic_sub - subtract integer from atomic variable |
56 | * @i: integer value to subtract | 57 | * @i: integer value to subtract |
57 | * @v: pointer of type atomic_t | 58 | * @v: pointer of type atomic_t |
58 | * | 59 | * |
59 | * Atomically subtracts @i from @v. | 60 | * Atomically subtracts @i from @v. |
60 | */ | 61 | */ |
61 | static __inline__ void atomic_sub(int i, atomic_t *v) | 62 | static inline void atomic_sub(int i, atomic_t *v) |
62 | { | 63 | { |
63 | __asm__ __volatile__( | 64 | asm volatile(LOCK_PREFIX "subl %1,%0" |
64 | LOCK_PREFIX "subl %1,%0" | 65 | : "+m" (v->counter) |
65 | :"+m" (v->counter) | 66 | : "ir" (i)); |
66 | :"ir" (i)); | ||
67 | } | 67 | } |
68 | 68 | ||
69 | /** | 69 | /** |
70 | * atomic_sub_and_test - subtract value from variable and test result | 70 | * atomic_sub_and_test - subtract value from variable and test result |
71 | * @i: integer value to subtract | 71 | * @i: integer value to subtract |
72 | * @v: pointer of type atomic_t | 72 | * @v: pointer of type atomic_t |
73 | * | 73 | * |
74 | * Atomically subtracts @i from @v and returns | 74 | * Atomically subtracts @i from @v and returns |
75 | * true if the result is zero, or false for all | 75 | * true if the result is zero, or false for all |
76 | * other cases. | 76 | * other cases. |
77 | */ | 77 | */ |
78 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | 78 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
79 | { | 79 | { |
80 | unsigned char c; | 80 | unsigned char c; |
81 | 81 | ||
82 | __asm__ __volatile__( | 82 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
83 | LOCK_PREFIX "subl %2,%0; sete %1" | 83 | : "+m" (v->counter), "=qm" (c) |
84 | :"+m" (v->counter), "=qm" (c) | 84 | : "ir" (i) : "memory"); |
85 | :"ir" (i) : "memory"); | ||
86 | return c; | 85 | return c; |
87 | } | 86 | } |
88 | 87 | ||
89 | /** | 88 | /** |
90 | * atomic_inc - increment atomic variable | 89 | * atomic_inc - increment atomic variable |
91 | * @v: pointer of type atomic_t | 90 | * @v: pointer of type atomic_t |
92 | * | 91 | * |
93 | * Atomically increments @v by 1. | 92 | * Atomically increments @v by 1. |
94 | */ | 93 | */ |
95 | static __inline__ void atomic_inc(atomic_t *v) | 94 | static inline void atomic_inc(atomic_t *v) |
96 | { | 95 | { |
97 | __asm__ __volatile__( | 96 | asm volatile(LOCK_PREFIX "incl %0" |
98 | LOCK_PREFIX "incl %0" | 97 | : "+m" (v->counter)); |
99 | :"+m" (v->counter)); | ||
100 | } | 98 | } |
101 | 99 | ||
102 | /** | 100 | /** |
103 | * atomic_dec - decrement atomic variable | 101 | * atomic_dec - decrement atomic variable |
104 | * @v: pointer of type atomic_t | 102 | * @v: pointer of type atomic_t |
105 | * | 103 | * |
106 | * Atomically decrements @v by 1. | 104 | * Atomically decrements @v by 1. |
107 | */ | 105 | */ |
108 | static __inline__ void atomic_dec(atomic_t *v) | 106 | static inline void atomic_dec(atomic_t *v) |
109 | { | 107 | { |
110 | __asm__ __volatile__( | 108 | asm volatile(LOCK_PREFIX "decl %0" |
111 | LOCK_PREFIX "decl %0" | 109 | : "+m" (v->counter)); |
112 | :"+m" (v->counter)); | ||
113 | } | 110 | } |
114 | 111 | ||
115 | /** | 112 | /** |
116 | * atomic_dec_and_test - decrement and test | 113 | * atomic_dec_and_test - decrement and test |
117 | * @v: pointer of type atomic_t | 114 | * @v: pointer of type atomic_t |
118 | * | 115 | * |
119 | * Atomically decrements @v by 1 and | 116 | * Atomically decrements @v by 1 and |
120 | * returns true if the result is 0, or false for all other | 117 | * returns true if the result is 0, or false for all other |
121 | * cases. | 118 | * cases. |
122 | */ | 119 | */ |
123 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 120 | static inline int atomic_dec_and_test(atomic_t *v) |
124 | { | 121 | { |
125 | unsigned char c; | 122 | unsigned char c; |
126 | 123 | ||
127 | __asm__ __volatile__( | 124 | asm volatile(LOCK_PREFIX "decl %0; sete %1" |
128 | LOCK_PREFIX "decl %0; sete %1" | 125 | : "+m" (v->counter), "=qm" (c) |
129 | :"+m" (v->counter), "=qm" (c) | 126 | : : "memory"); |
130 | : : "memory"); | ||
131 | return c != 0; | 127 | return c != 0; |
132 | } | 128 | } |
133 | 129 | ||
134 | /** | 130 | /** |
135 | * atomic_inc_and_test - increment and test | 131 | * atomic_inc_and_test - increment and test |
136 | * @v: pointer of type atomic_t | 132 | * @v: pointer of type atomic_t |
137 | * | 133 | * |
138 | * Atomically increments @v by 1 | 134 | * Atomically increments @v by 1 |
139 | * and returns true if the result is zero, or false for all | 135 | * and returns true if the result is zero, or false for all |
140 | * other cases. | 136 | * other cases. |
141 | */ | 137 | */ |
142 | static __inline__ int atomic_inc_and_test(atomic_t *v) | 138 | static inline int atomic_inc_and_test(atomic_t *v) |
143 | { | 139 | { |
144 | unsigned char c; | 140 | unsigned char c; |
145 | 141 | ||
146 | __asm__ __volatile__( | 142 | asm volatile(LOCK_PREFIX "incl %0; sete %1" |
147 | LOCK_PREFIX "incl %0; sete %1" | 143 | : "+m" (v->counter), "=qm" (c) |
148 | :"+m" (v->counter), "=qm" (c) | 144 | : : "memory"); |
149 | : : "memory"); | ||
150 | return c != 0; | 145 | return c != 0; |
151 | } | 146 | } |
152 | 147 | ||
@@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) | |||
154 | * atomic_add_negative - add and test if negative | 149 | * atomic_add_negative - add and test if negative |
155 | * @v: pointer of type atomic_t | 150 | * @v: pointer of type atomic_t |
156 | * @i: integer value to add | 151 | * @i: integer value to add |
157 | * | 152 | * |
158 | * Atomically adds @i to @v and returns true | 153 | * Atomically adds @i to @v and returns true |
159 | * if the result is negative, or false when | 154 | * if the result is negative, or false when |
160 | * result is greater than or equal to zero. | 155 | * result is greater than or equal to zero. |
161 | */ | 156 | */ |
162 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | 157 | static inline int atomic_add_negative(int i, atomic_t *v) |
163 | { | 158 | { |
164 | unsigned char c; | 159 | unsigned char c; |
165 | 160 | ||
166 | __asm__ __volatile__( | 161 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
167 | LOCK_PREFIX "addl %2,%0; sets %1" | 162 | : "+m" (v->counter), "=qm" (c) |
168 | :"+m" (v->counter), "=qm" (c) | 163 | : "ir" (i) : "memory"); |
169 | :"ir" (i) : "memory"); | ||
170 | return c; | 164 | return c; |
171 | } | 165 | } |
172 | 166 | ||
@@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) | |||
177 | * | 171 | * |
178 | * Atomically adds @i to @v and returns @i + @v | 172 | * Atomically adds @i to @v and returns @i + @v |
179 | */ | 173 | */ |
180 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 174 | static inline int atomic_add_return(int i, atomic_t *v) |
181 | { | 175 | { |
182 | int __i; | 176 | int __i; |
183 | #ifdef CONFIG_M386 | 177 | #ifdef CONFIG_M386 |
184 | unsigned long flags; | 178 | unsigned long flags; |
185 | if(unlikely(boot_cpu_data.x86 <= 3)) | 179 | if (unlikely(boot_cpu_data.x86 <= 3)) |
186 | goto no_xadd; | 180 | goto no_xadd; |
187 | #endif | 181 | #endif |
188 | /* Modern 486+ processor */ | 182 | /* Modern 486+ processor */ |
189 | __i = i; | 183 | __i = i; |
190 | __asm__ __volatile__( | 184 | asm volatile(LOCK_PREFIX "xaddl %0, %1" |
191 | LOCK_PREFIX "xaddl %0, %1" | 185 | : "+r" (i), "+m" (v->counter) |
192 | :"+r" (i), "+m" (v->counter) | 186 | : : "memory"); |
193 | : : "memory"); | ||
194 | return i + __i; | 187 | return i + __i; |
195 | 188 | ||
196 | #ifdef CONFIG_M386 | 189 | #ifdef CONFIG_M386 |
@@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */ | |||
210 | * | 203 | * |
211 | * Atomically subtracts @i from @v and returns @v - @i | 204 | * Atomically subtracts @i from @v and returns @v - @i |
212 | */ | 205 | */ |
213 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 206 | static inline int atomic_sub_return(int i, atomic_t *v) |
214 | { | 207 | { |
215 | return atomic_add_return(-i,v); | 208 | return atomic_add_return(-i, v); |
216 | } | 209 | } |
217 | 210 | ||
218 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | 211 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
@@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
227 | * Atomically adds @a to @v, so long as @v was not already @u. | 220 | * Atomically adds @a to @v, so long as @v was not already @u. |
228 | * Returns non-zero if @v was not @u, and zero otherwise. | 221 | * Returns non-zero if @v was not @u, and zero otherwise. |
229 | */ | 222 | */ |
230 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 223 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
231 | { | 224 | { |
232 | int c, old; | 225 | int c, old; |
233 | c = atomic_read(v); | 226 | c = atomic_read(v); |
@@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
244 | 237 | ||
245 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 238 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
246 | 239 | ||
247 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | 240 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
248 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | 241 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
249 | 242 | ||
250 | /* These are x86-specific, used by some header files */ | 243 | /* These are x86-specific, used by some header files */ |
251 | #define atomic_clear_mask(mask, addr) \ | 244 | #define atomic_clear_mask(mask, addr) \ |
252 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ | 245 | asm volatile(LOCK_PREFIX "andl %0,%1" \ |
253 | : : "r" (~(mask)),"m" (*addr) : "memory") | 246 | : : "r" (~(mask)), "m" (*(addr)) : "memory") |
254 | 247 | ||
255 | #define atomic_set_mask(mask, addr) \ | 248 | #define atomic_set_mask(mask, addr) \ |
256 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ | 249 | asm volatile(LOCK_PREFIX "orl %0,%1" \ |
257 | : : "r" (mask),"m" (*(addr)) : "memory") | 250 | : : "r" (mask), "m" (*(addr)) : "memory") |
258 | 251 | ||
259 | /* Atomic operations are already serializing on x86 */ | 252 | /* Atomic operations are already serializing on x86 */ |
260 | #define smp_mb__before_atomic_dec() barrier() | 253 | #define smp_mb__before_atomic_dec() barrier() |
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 2d20a7a19f62..3e0cd7d38335 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h | |||
@@ -22,140 +22,135 @@ | |||
22 | * on us. We need to use _exactly_ the address the user gave us, | 22 | * on us. We need to use _exactly_ the address the user gave us, |
23 | * not some alias that contains the same information. | 23 | * not some alias that contains the same information. |
24 | */ | 24 | */ |
25 | typedef struct { int counter; } atomic_t; | 25 | typedef struct { |
26 | int counter; | ||
27 | } atomic_t; | ||
26 | 28 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 29 | #define ATOMIC_INIT(i) { (i) } |
28 | 30 | ||
29 | /** | 31 | /** |
30 | * atomic_read - read atomic variable | 32 | * atomic_read - read atomic variable |
31 | * @v: pointer of type atomic_t | 33 | * @v: pointer of type atomic_t |
32 | * | 34 | * |
33 | * Atomically reads the value of @v. | 35 | * Atomically reads the value of @v. |
34 | */ | 36 | */ |
35 | #define atomic_read(v) ((v)->counter) | 37 | #define atomic_read(v) ((v)->counter) |
36 | 38 | ||
37 | /** | 39 | /** |
38 | * atomic_set - set atomic variable | 40 | * atomic_set - set atomic variable |
39 | * @v: pointer of type atomic_t | 41 | * @v: pointer of type atomic_t |
40 | * @i: required value | 42 | * @i: required value |
41 | * | 43 | * |
42 | * Atomically sets the value of @v to @i. | 44 | * Atomically sets the value of @v to @i. |
43 | */ | 45 | */ |
44 | #define atomic_set(v,i) (((v)->counter) = (i)) | 46 | #define atomic_set(v, i) (((v)->counter) = (i)) |
45 | 47 | ||
46 | /** | 48 | /** |
47 | * atomic_add - add integer to atomic variable | 49 | * atomic_add - add integer to atomic variable |
48 | * @i: integer value to add | 50 | * @i: integer value to add |
49 | * @v: pointer of type atomic_t | 51 | * @v: pointer of type atomic_t |
50 | * | 52 | * |
51 | * Atomically adds @i to @v. | 53 | * Atomically adds @i to @v. |
52 | */ | 54 | */ |
53 | static __inline__ void atomic_add(int i, atomic_t *v) | 55 | static inline void atomic_add(int i, atomic_t *v) |
54 | { | 56 | { |
55 | __asm__ __volatile__( | 57 | asm volatile(LOCK_PREFIX "addl %1,%0" |
56 | LOCK_PREFIX "addl %1,%0" | 58 | : "=m" (v->counter) |
57 | :"=m" (v->counter) | 59 | : "ir" (i), "m" (v->counter)); |
58 | :"ir" (i), "m" (v->counter)); | ||
59 | } | 60 | } |
60 | 61 | ||
61 | /** | 62 | /** |
62 | * atomic_sub - subtract the atomic variable | 63 | * atomic_sub - subtract the atomic variable |
63 | * @i: integer value to subtract | 64 | * @i: integer value to subtract |
64 | * @v: pointer of type atomic_t | 65 | * @v: pointer of type atomic_t |
65 | * | 66 | * |
66 | * Atomically subtracts @i from @v. | 67 | * Atomically subtracts @i from @v. |
67 | */ | 68 | */ |
68 | static __inline__ void atomic_sub(int i, atomic_t *v) | 69 | static inline void atomic_sub(int i, atomic_t *v) |
69 | { | 70 | { |
70 | __asm__ __volatile__( | 71 | asm volatile(LOCK_PREFIX "subl %1,%0" |
71 | LOCK_PREFIX "subl %1,%0" | 72 | : "=m" (v->counter) |
72 | :"=m" (v->counter) | 73 | : "ir" (i), "m" (v->counter)); |
73 | :"ir" (i), "m" (v->counter)); | ||
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * atomic_sub_and_test - subtract value from variable and test result | 77 | * atomic_sub_and_test - subtract value from variable and test result |
78 | * @i: integer value to subtract | 78 | * @i: integer value to subtract |
79 | * @v: pointer of type atomic_t | 79 | * @v: pointer of type atomic_t |
80 | * | 80 | * |
81 | * Atomically subtracts @i from @v and returns | 81 | * Atomically subtracts @i from @v and returns |
82 | * true if the result is zero, or false for all | 82 | * true if the result is zero, or false for all |
83 | * other cases. | 83 | * other cases. |
84 | */ | 84 | */ |
85 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | 85 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
86 | { | 86 | { |
87 | unsigned char c; | 87 | unsigned char c; |
88 | 88 | ||
89 | __asm__ __volatile__( | 89 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
90 | LOCK_PREFIX "subl %2,%0; sete %1" | 90 | : "=m" (v->counter), "=qm" (c) |
91 | :"=m" (v->counter), "=qm" (c) | 91 | : "ir" (i), "m" (v->counter) : "memory"); |
92 | :"ir" (i), "m" (v->counter) : "memory"); | ||
93 | return c; | 92 | return c; |
94 | } | 93 | } |
95 | 94 | ||
96 | /** | 95 | /** |
97 | * atomic_inc - increment atomic variable | 96 | * atomic_inc - increment atomic variable |
98 | * @v: pointer of type atomic_t | 97 | * @v: pointer of type atomic_t |
99 | * | 98 | * |
100 | * Atomically increments @v by 1. | 99 | * Atomically increments @v by 1. |
101 | */ | 100 | */ |
102 | static __inline__ void atomic_inc(atomic_t *v) | 101 | static inline void atomic_inc(atomic_t *v) |
103 | { | 102 | { |
104 | __asm__ __volatile__( | 103 | asm volatile(LOCK_PREFIX "incl %0" |
105 | LOCK_PREFIX "incl %0" | 104 | : "=m" (v->counter) |
106 | :"=m" (v->counter) | 105 | : "m" (v->counter)); |
107 | :"m" (v->counter)); | ||
108 | } | 106 | } |
109 | 107 | ||
110 | /** | 108 | /** |
111 | * atomic_dec - decrement atomic variable | 109 | * atomic_dec - decrement atomic variable |
112 | * @v: pointer of type atomic_t | 110 | * @v: pointer of type atomic_t |
113 | * | 111 | * |
114 | * Atomically decrements @v by 1. | 112 | * Atomically decrements @v by 1. |
115 | */ | 113 | */ |
116 | static __inline__ void atomic_dec(atomic_t *v) | 114 | static inline void atomic_dec(atomic_t *v) |
117 | { | 115 | { |
118 | __asm__ __volatile__( | 116 | asm volatile(LOCK_PREFIX "decl %0" |
119 | LOCK_PREFIX "decl %0" | 117 | : "=m" (v->counter) |
120 | :"=m" (v->counter) | 118 | : "m" (v->counter)); |
121 | :"m" (v->counter)); | ||
122 | } | 119 | } |
123 | 120 | ||
124 | /** | 121 | /** |
125 | * atomic_dec_and_test - decrement and test | 122 | * atomic_dec_and_test - decrement and test |
126 | * @v: pointer of type atomic_t | 123 | * @v: pointer of type atomic_t |
127 | * | 124 | * |
128 | * Atomically decrements @v by 1 and | 125 | * Atomically decrements @v by 1 and |
129 | * returns true if the result is 0, or false for all other | 126 | * returns true if the result is 0, or false for all other |
130 | * cases. | 127 | * cases. |
131 | */ | 128 | */ |
132 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 129 | static inline int atomic_dec_and_test(atomic_t *v) |
133 | { | 130 | { |
134 | unsigned char c; | 131 | unsigned char c; |
135 | 132 | ||
136 | __asm__ __volatile__( | 133 | asm volatile(LOCK_PREFIX "decl %0; sete %1" |
137 | LOCK_PREFIX "decl %0; sete %1" | 134 | : "=m" (v->counter), "=qm" (c) |
138 | :"=m" (v->counter), "=qm" (c) | 135 | : "m" (v->counter) : "memory"); |
139 | :"m" (v->counter) : "memory"); | ||
140 | return c != 0; | 136 | return c != 0; |
141 | } | 137 | } |
142 | 138 | ||
143 | /** | 139 | /** |
144 | * atomic_inc_and_test - increment and test | 140 | * atomic_inc_and_test - increment and test |
145 | * @v: pointer of type atomic_t | 141 | * @v: pointer of type atomic_t |
146 | * | 142 | * |
147 | * Atomically increments @v by 1 | 143 | * Atomically increments @v by 1 |
148 | * and returns true if the result is zero, or false for all | 144 | * and returns true if the result is zero, or false for all |
149 | * other cases. | 145 | * other cases. |
150 | */ | 146 | */ |
151 | static __inline__ int atomic_inc_and_test(atomic_t *v) | 147 | static inline int atomic_inc_and_test(atomic_t *v) |
152 | { | 148 | { |
153 | unsigned char c; | 149 | unsigned char c; |
154 | 150 | ||
155 | __asm__ __volatile__( | 151 | asm volatile(LOCK_PREFIX "incl %0; sete %1" |
156 | LOCK_PREFIX "incl %0; sete %1" | 152 | : "=m" (v->counter), "=qm" (c) |
157 | :"=m" (v->counter), "=qm" (c) | 153 | : "m" (v->counter) : "memory"); |
158 | :"m" (v->counter) : "memory"); | ||
159 | return c != 0; | 154 | return c != 0; |
160 | } | 155 | } |
161 | 156 | ||
@@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) | |||
163 | * atomic_add_negative - add and test if negative | 158 | * atomic_add_negative - add and test if negative |
164 | * @i: integer value to add | 159 | * @i: integer value to add |
165 | * @v: pointer of type atomic_t | 160 | * @v: pointer of type atomic_t |
166 | * | 161 | * |
167 | * Atomically adds @i to @v and returns true | 162 | * Atomically adds @i to @v and returns true |
168 | * if the result is negative, or false when | 163 | * if the result is negative, or false when |
169 | * result is greater than or equal to zero. | 164 | * result is greater than or equal to zero. |
170 | */ | 165 | */ |
171 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | 166 | static inline int atomic_add_negative(int i, atomic_t *v) |
172 | { | 167 | { |
173 | unsigned char c; | 168 | unsigned char c; |
174 | 169 | ||
175 | __asm__ __volatile__( | 170 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
176 | LOCK_PREFIX "addl %2,%0; sets %1" | 171 | : "=m" (v->counter), "=qm" (c) |
177 | :"=m" (v->counter), "=qm" (c) | 172 | : "ir" (i), "m" (v->counter) : "memory"); |
178 | :"ir" (i), "m" (v->counter) : "memory"); | ||
179 | return c; | 173 | return c; |
180 | } | 174 | } |
181 | 175 | ||
@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) | |||
186 | * | 180 | * |
187 | * Atomically adds @i to @v and returns @i + @v | 181 | * Atomically adds @i to @v and returns @i + @v |
188 | */ | 182 | */ |
189 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 183 | static inline int atomic_add_return(int i, atomic_t *v) |
190 | { | 184 | { |
191 | int __i = i; | 185 | int __i = i; |
192 | __asm__ __volatile__( | 186 | asm volatile(LOCK_PREFIX "xaddl %0, %1" |
193 | LOCK_PREFIX "xaddl %0, %1" | 187 | : "+r" (i), "+m" (v->counter) |
194 | :"+r" (i), "+m" (v->counter) | 188 | : : "memory"); |
195 | : : "memory"); | ||
196 | return i + __i; | 189 | return i + __i; |
197 | } | 190 | } |
198 | 191 | ||
199 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 192 | static inline int atomic_sub_return(int i, atomic_t *v) |
200 | { | 193 | { |
201 | return atomic_add_return(-i,v); | 194 | return atomic_add_return(-i, v); |
202 | } | 195 | } |
203 | 196 | ||
204 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | 197 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
205 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | 198 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
206 | 199 | ||
207 | /* An 64bit atomic type */ | 200 | /* An 64bit atomic type */ |
208 | 201 | ||
209 | typedef struct { long counter; } atomic64_t; | 202 | typedef struct { |
203 | long counter; | ||
204 | } atomic64_t; | ||
210 | 205 | ||
211 | #define ATOMIC64_INIT(i) { (i) } | 206 | #define ATOMIC64_INIT(i) { (i) } |
212 | 207 | ||
@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t; | |||
226 | * | 221 | * |
227 | * Atomically sets the value of @v to @i. | 222 | * Atomically sets the value of @v to @i. |
228 | */ | 223 | */ |
229 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 224 | #define atomic64_set(v, i) (((v)->counter) = (i)) |
230 | 225 | ||
231 | /** | 226 | /** |
232 | * atomic64_add - add integer to atomic64 variable | 227 | * atomic64_add - add integer to atomic64 variable |
@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t; | |||
235 | * | 230 | * |
236 | * Atomically adds @i to @v. | 231 | * Atomically adds @i to @v. |
237 | */ | 232 | */ |
238 | static __inline__ void atomic64_add(long i, atomic64_t *v) | 233 | static inline void atomic64_add(long i, atomic64_t *v) |
239 | { | 234 | { |
240 | __asm__ __volatile__( | 235 | asm volatile(LOCK_PREFIX "addq %1,%0" |
241 | LOCK_PREFIX "addq %1,%0" | 236 | : "=m" (v->counter) |
242 | :"=m" (v->counter) | 237 | : "ir" (i), "m" (v->counter)); |
243 | :"ir" (i), "m" (v->counter)); | ||
244 | } | 238 | } |
245 | 239 | ||
246 | /** | 240 | /** |
@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) | |||
250 | * | 244 | * |
251 | * Atomically subtracts @i from @v. | 245 | * Atomically subtracts @i from @v. |
252 | */ | 246 | */ |
253 | static __inline__ void atomic64_sub(long i, atomic64_t *v) | 247 | static inline void atomic64_sub(long i, atomic64_t *v) |
254 | { | 248 | { |
255 | __asm__ __volatile__( | 249 | asm volatile(LOCK_PREFIX "subq %1,%0" |
256 | LOCK_PREFIX "subq %1,%0" | 250 | : "=m" (v->counter) |
257 | :"=m" (v->counter) | 251 | : "ir" (i), "m" (v->counter)); |
258 | :"ir" (i), "m" (v->counter)); | ||
259 | } | 252 | } |
260 | 253 | ||
261 | /** | 254 | /** |
@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v) | |||
267 | * true if the result is zero, or false for all | 260 | * true if the result is zero, or false for all |
268 | * other cases. | 261 | * other cases. |
269 | */ | 262 | */ |
270 | static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) | 263 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) |
271 | { | 264 | { |
272 | unsigned char c; | 265 | unsigned char c; |
273 | 266 | ||
274 | __asm__ __volatile__( | 267 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" |
275 | LOCK_PREFIX "subq %2,%0; sete %1" | 268 | : "=m" (v->counter), "=qm" (c) |
276 | :"=m" (v->counter), "=qm" (c) | 269 | : "ir" (i), "m" (v->counter) : "memory"); |
277 | :"ir" (i), "m" (v->counter) : "memory"); | ||
278 | return c; | 270 | return c; |
279 | } | 271 | } |
280 | 272 | ||
@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) | |||
284 | * | 276 | * |
285 | * Atomically increments @v by 1. | 277 | * Atomically increments @v by 1. |
286 | */ | 278 | */ |
287 | static __inline__ void atomic64_inc(atomic64_t *v) | 279 | static inline void atomic64_inc(atomic64_t *v) |
288 | { | 280 | { |
289 | __asm__ __volatile__( | 281 | asm volatile(LOCK_PREFIX "incq %0" |
290 | LOCK_PREFIX "incq %0" | 282 | : "=m" (v->counter) |
291 | :"=m" (v->counter) | 283 | : "m" (v->counter)); |
292 | :"m" (v->counter)); | ||
293 | } | 284 | } |
294 | 285 | ||
295 | /** | 286 | /** |
@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v) | |||
298 | * | 289 | * |
299 | * Atomically decrements @v by 1. | 290 | * Atomically decrements @v by 1. |
300 | */ | 291 | */ |
301 | static __inline__ void atomic64_dec(atomic64_t *v) | 292 | static inline void atomic64_dec(atomic64_t *v) |
302 | { | 293 | { |
303 | __asm__ __volatile__( | 294 | asm volatile(LOCK_PREFIX "decq %0" |
304 | LOCK_PREFIX "decq %0" | 295 | : "=m" (v->counter) |
305 | :"=m" (v->counter) | 296 | : "m" (v->counter)); |
306 | :"m" (v->counter)); | ||
307 | } | 297 | } |
308 | 298 | ||
309 | /** | 299 | /** |
@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v) | |||
314 | * returns true if the result is 0, or false for all other | 304 | * returns true if the result is 0, or false for all other |
315 | * cases. | 305 | * cases. |
316 | */ | 306 | */ |
317 | static __inline__ int atomic64_dec_and_test(atomic64_t *v) | 307 | static inline int atomic64_dec_and_test(atomic64_t *v) |
318 | { | 308 | { |
319 | unsigned char c; | 309 | unsigned char c; |
320 | 310 | ||
321 | __asm__ __volatile__( | 311 | asm volatile(LOCK_PREFIX "decq %0; sete %1" |
322 | LOCK_PREFIX "decq %0; sete %1" | 312 | : "=m" (v->counter), "=qm" (c) |
323 | :"=m" (v->counter), "=qm" (c) | 313 | : "m" (v->counter) : "memory"); |
324 | :"m" (v->counter) : "memory"); | ||
325 | return c != 0; | 314 | return c != 0; |
326 | } | 315 | } |
327 | 316 | ||
@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) | |||
333 | * and returns true if the result is zero, or false for all | 322 | * and returns true if the result is zero, or false for all |
334 | * other cases. | 323 | * other cases. |
335 | */ | 324 | */ |
336 | static __inline__ int atomic64_inc_and_test(atomic64_t *v) | 325 | static inline int atomic64_inc_and_test(atomic64_t *v) |
337 | { | 326 | { |
338 | unsigned char c; | 327 | unsigned char c; |
339 | 328 | ||
340 | __asm__ __volatile__( | 329 | asm volatile(LOCK_PREFIX "incq %0; sete %1" |
341 | LOCK_PREFIX "incq %0; sete %1" | 330 | : "=m" (v->counter), "=qm" (c) |
342 | :"=m" (v->counter), "=qm" (c) | 331 | : "m" (v->counter) : "memory"); |
343 | :"m" (v->counter) : "memory"); | ||
344 | return c != 0; | 332 | return c != 0; |
345 | } | 333 | } |
346 | 334 | ||
@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) | |||
353 | * if the result is negative, or false when | 341 | * if the result is negative, or false when |
354 | * result is greater than or equal to zero. | 342 | * result is greater than or equal to zero. |
355 | */ | 343 | */ |
356 | static __inline__ int atomic64_add_negative(long i, atomic64_t *v) | 344 | static inline int atomic64_add_negative(long i, atomic64_t *v) |
357 | { | 345 | { |
358 | unsigned char c; | 346 | unsigned char c; |
359 | 347 | ||
360 | __asm__ __volatile__( | 348 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" |
361 | LOCK_PREFIX "addq %2,%0; sets %1" | 349 | : "=m" (v->counter), "=qm" (c) |
362 | :"=m" (v->counter), "=qm" (c) | 350 | : "ir" (i), "m" (v->counter) : "memory"); |
363 | :"ir" (i), "m" (v->counter) : "memory"); | ||
364 | return c; | 351 | return c; |
365 | } | 352 | } |
366 | 353 | ||
@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) | |||
371 | * | 358 | * |
372 | * Atomically adds @i to @v and returns @i + @v | 359 | * Atomically adds @i to @v and returns @i + @v |
373 | */ | 360 | */ |
374 | static __inline__ long atomic64_add_return(long i, atomic64_t *v) | 361 | static inline long atomic64_add_return(long i, atomic64_t *v) |
375 | { | 362 | { |
376 | long __i = i; | 363 | long __i = i; |
377 | __asm__ __volatile__( | 364 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" |
378 | LOCK_PREFIX "xaddq %0, %1;" | 365 | : "+r" (i), "+m" (v->counter) |
379 | :"+r" (i), "+m" (v->counter) | 366 | : : "memory"); |
380 | : : "memory"); | ||
381 | return i + __i; | 367 | return i + __i; |
382 | } | 368 | } |
383 | 369 | ||
384 | static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | 370 | static inline long atomic64_sub_return(long i, atomic64_t *v) |
385 | { | 371 | { |
386 | return atomic64_add_return(-i,v); | 372 | return atomic64_add_return(-i, v); |
387 | } | 373 | } |
388 | 374 | ||
389 | #define atomic64_inc_return(v) (atomic64_add_return(1,v)) | 375 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) |
390 | #define atomic64_dec_return(v) (atomic64_sub_return(1,v)) | 376 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) |
391 | 377 | ||
392 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 378 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
393 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 379 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
394 | 380 | ||
395 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 381 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
396 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 382 | #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) |
397 | 383 | ||
398 | /** | 384 | /** |
399 | * atomic_add_unless - add unless the number is a given value | 385 | * atomic_add_unless - add unless the number is a given value |
@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | |||
404 | * Atomically adds @a to @v, so long as it was not @u. | 390 | * Atomically adds @a to @v, so long as it was not @u. |
405 | * Returns non-zero if @v was not @u, and zero otherwise. | 391 | * Returns non-zero if @v was not @u, and zero otherwise. |
406 | */ | 392 | */ |
407 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 393 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
408 | { | 394 | { |
409 | int c, old; | 395 | int c, old; |
410 | c = atomic_read(v); | 396 | c = atomic_read(v); |
@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
430 | * Atomically adds @a to @v, so long as it was not @u. | 416 | * Atomically adds @a to @v, so long as it was not @u. |
431 | * Returns non-zero if @v was not @u, and zero otherwise. | 417 | * Returns non-zero if @v was not @u, and zero otherwise. |
432 | */ | 418 | */ |
433 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 419 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) |
434 | { | 420 | { |
435 | long c, old; | 421 | long c, old; |
436 | c = atomic64_read(v); | 422 | c = atomic64_read(v); |
@@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
448 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 434 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
449 | 435 | ||
450 | /* These are x86-specific, used by some header files */ | 436 | /* These are x86-specific, used by some header files */ |
451 | #define atomic_clear_mask(mask, addr) \ | 437 | #define atomic_clear_mask(mask, addr) \ |
452 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ | 438 | asm volatile(LOCK_PREFIX "andl %0,%1" \ |
453 | : : "r" (~(mask)),"m" (*addr) : "memory") | 439 | : : "r" (~(mask)), "m" (*(addr)) : "memory") |
454 | 440 | ||
455 | #define atomic_set_mask(mask, addr) \ | 441 | #define atomic_set_mask(mask, addr) \ |
456 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ | 442 | asm volatile(LOCK_PREFIX "orl %0,%1" \ |
457 | : : "r" ((unsigned)mask),"m" (*(addr)) : "memory") | 443 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
444 | : "memory") | ||
458 | 445 | ||
459 | /* Atomic operations are already serializing on x86 */ | 446 | /* Atomic operations are already serializing on x86 */ |
460 | #define smp_mb__before_atomic_dec() barrier() | 447 | #define smp_mb__before_atomic_dec() barrier() |
diff --git a/include/asm-x86/mach-default/bios_ebda.h b/include/asm-x86/bios_ebda.h index 9cbd9a668af8..9cbd9a668af8 100644 --- a/include/asm-x86/mach-default/bios_ebda.h +++ b/include/asm-x86/bios_ebda.h | |||
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1a23ce1a5697..1ae7b270a1ef 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h | |||
@@ -23,10 +23,13 @@ | |||
23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | 23 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
24 | /* Technically wrong, but this avoids compilation errors on some gcc | 24 | /* Technically wrong, but this avoids compilation errors on some gcc |
25 | versions. */ | 25 | versions. */ |
26 | #define ADDR "=m" (*(volatile long *) addr) | 26 | #define ADDR "=m" (*(volatile long *)addr) |
27 | #define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) | ||
27 | #else | 28 | #else |
28 | #define ADDR "+m" (*(volatile long *) addr) | 29 | #define ADDR "+m" (*(volatile long *) addr) |
30 | #define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) | ||
29 | #endif | 31 | #endif |
32 | #define BASE_ADDR "m" (*(volatile int *)addr) | ||
30 | 33 | ||
31 | /** | 34 | /** |
32 | * set_bit - Atomically set a bit in memory | 35 | * set_bit - Atomically set a bit in memory |
@@ -45,9 +48,7 @@ | |||
45 | */ | 48 | */ |
46 | static inline void set_bit(int nr, volatile void *addr) | 49 | static inline void set_bit(int nr, volatile void *addr) |
47 | { | 50 | { |
48 | asm volatile(LOCK_PREFIX "bts %1,%0" | 51 | asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
49 | : ADDR | ||
50 | : "Ir" (nr) : "memory"); | ||
51 | } | 52 | } |
52 | 53 | ||
53 | /** | 54 | /** |
@@ -79,9 +80,7 @@ static inline void __set_bit(int nr, volatile void *addr) | |||
79 | */ | 80 | */ |
80 | static inline void clear_bit(int nr, volatile void *addr) | 81 | static inline void clear_bit(int nr, volatile void *addr) |
81 | { | 82 | { |
82 | asm volatile(LOCK_PREFIX "btr %1,%0" | 83 | asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
83 | : ADDR | ||
84 | : "Ir" (nr)); | ||
85 | } | 84 | } |
86 | 85 | ||
87 | /* | 86 | /* |
@@ -100,7 +99,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) | |||
100 | 99 | ||
101 | static inline void __clear_bit(int nr, volatile void *addr) | 100 | static inline void __clear_bit(int nr, volatile void *addr) |
102 | { | 101 | { |
103 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 102 | asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
104 | } | 103 | } |
105 | 104 | ||
106 | /* | 105 | /* |
@@ -135,7 +134,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) | |||
135 | */ | 134 | */ |
136 | static inline void __change_bit(int nr, volatile void *addr) | 135 | static inline void __change_bit(int nr, volatile void *addr) |
137 | { | 136 | { |
138 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 137 | asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
139 | } | 138 | } |
140 | 139 | ||
141 | /** | 140 | /** |
@@ -149,8 +148,7 @@ static inline void __change_bit(int nr, volatile void *addr) | |||
149 | */ | 148 | */ |
150 | static inline void change_bit(int nr, volatile void *addr) | 149 | static inline void change_bit(int nr, volatile void *addr) |
151 | { | 150 | { |
152 | asm volatile(LOCK_PREFIX "btc %1,%0" | 151 | asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); |
153 | : ADDR : "Ir" (nr)); | ||
154 | } | 152 | } |
155 | 153 | ||
156 | /** | 154 | /** |
@@ -166,9 +164,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) | |||
166 | int oldbit; | 164 | int oldbit; |
167 | 165 | ||
168 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" | 166 | asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
169 | "sbb %0,%0" | 167 | "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
170 | : "=r" (oldbit), ADDR | ||
171 | : "Ir" (nr) : "memory"); | ||
172 | 168 | ||
173 | return oldbit; | 169 | return oldbit; |
174 | } | 170 | } |
@@ -198,10 +194,9 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) | |||
198 | { | 194 | { |
199 | int oldbit; | 195 | int oldbit; |
200 | 196 | ||
201 | asm("bts %2,%1\n\t" | 197 | asm volatile("bts %2,%3\n\t" |
202 | "sbb %0,%0" | 198 | "sbb %0,%0" |
203 | : "=r" (oldbit), ADDR | 199 | : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); |
204 | : "Ir" (nr)); | ||
205 | return oldbit; | 200 | return oldbit; |
206 | } | 201 | } |
207 | 202 | ||
@@ -219,8 +214,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) | |||
219 | 214 | ||
220 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" | 215 | asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
221 | "sbb %0,%0" | 216 | "sbb %0,%0" |
222 | : "=r" (oldbit), ADDR | 217 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
223 | : "Ir" (nr) : "memory"); | ||
224 | 218 | ||
225 | return oldbit; | 219 | return oldbit; |
226 | } | 220 | } |
@@ -238,10 +232,9 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) | |||
238 | { | 232 | { |
239 | int oldbit; | 233 | int oldbit; |
240 | 234 | ||
241 | asm volatile("btr %2,%1\n\t" | 235 | asm volatile("btr %2,%3\n\t" |
242 | "sbb %0,%0" | 236 | "sbb %0,%0" |
243 | : "=r" (oldbit), ADDR | 237 | : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); |
244 | : "Ir" (nr)); | ||
245 | return oldbit; | 238 | return oldbit; |
246 | } | 239 | } |
247 | 240 | ||
@@ -250,10 +243,9 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) | |||
250 | { | 243 | { |
251 | int oldbit; | 244 | int oldbit; |
252 | 245 | ||
253 | asm volatile("btc %2,%1\n\t" | 246 | asm volatile("btc %2,%3\n\t" |
254 | "sbb %0,%0" | 247 | "sbb %0,%0" |
255 | : "=r" (oldbit), ADDR | 248 | : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); |
256 | : "Ir" (nr) : "memory"); | ||
257 | 249 | ||
258 | return oldbit; | 250 | return oldbit; |
259 | } | 251 | } |
@@ -272,8 +264,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) | |||
272 | 264 | ||
273 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" | 265 | asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
274 | "sbb %0,%0" | 266 | "sbb %0,%0" |
275 | : "=r" (oldbit), ADDR | 267 | : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
276 | : "Ir" (nr) : "memory"); | ||
277 | 268 | ||
278 | return oldbit; | 269 | return oldbit; |
279 | } | 270 | } |
@@ -288,10 +279,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) | |||
288 | { | 279 | { |
289 | int oldbit; | 280 | int oldbit; |
290 | 281 | ||
291 | asm volatile("bt %2,%1\n\t" | 282 | asm volatile("bt %2,%3\n\t" |
292 | "sbb %0,%0" | 283 | "sbb %0,%0" |
293 | : "=r" (oldbit) | 284 | : "=r" (oldbit) |
294 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | 285 | : "m" (((volatile const int *)addr)[nr >> 5]), |
286 | "Ir" (nr), BASE_ADDR); | ||
295 | 287 | ||
296 | return oldbit; | 288 | return oldbit; |
297 | } | 289 | } |
@@ -310,6 +302,8 @@ static int test_bit(int nr, const volatile unsigned long *addr); | |||
310 | constant_test_bit((nr),(addr)) : \ | 302 | constant_test_bit((nr),(addr)) : \ |
311 | variable_test_bit((nr),(addr))) | 303 | variable_test_bit((nr),(addr))) |
312 | 304 | ||
305 | #undef BASE_ADDR | ||
306 | #undef BIT_ADDR | ||
313 | #undef ADDR | 307 | #undef ADDR |
314 | 308 | ||
315 | #ifdef CONFIG_X86_32 | 309 | #ifdef CONFIG_X86_32 |
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index e4d75fcf9c03..2513a81f82aa 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h | |||
@@ -20,20 +20,22 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) | |||
20 | 20 | ||
21 | if (!size) | 21 | if (!size) |
22 | return 0; | 22 | return 0; |
23 | /* This looks at memory. Mark it volatile to tell gcc not to move it around */ | 23 | /* This looks at memory. |
24 | __asm__ __volatile__( | 24 | * Mark it volatile to tell gcc not to move it around |
25 | "movl $-1,%%eax\n\t" | 25 | */ |
26 | "xorl %%edx,%%edx\n\t" | 26 | asm volatile("movl $-1,%%eax\n\t" |
27 | "repe; scasl\n\t" | 27 | "xorl %%edx,%%edx\n\t" |
28 | "je 1f\n\t" | 28 | "repe; scasl\n\t" |
29 | "xorl -4(%%edi),%%eax\n\t" | 29 | "je 1f\n\t" |
30 | "subl $4,%%edi\n\t" | 30 | "xorl -4(%%edi),%%eax\n\t" |
31 | "bsfl %%eax,%%edx\n" | 31 | "subl $4,%%edi\n\t" |
32 | "1:\tsubl %%ebx,%%edi\n\t" | 32 | "bsfl %%eax,%%edx\n" |
33 | "shll $3,%%edi\n\t" | 33 | "1:\tsubl %%ebx,%%edi\n\t" |
34 | "addl %%edi,%%edx" | 34 | "shll $3,%%edi\n\t" |
35 | :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) | 35 | "addl %%edi,%%edx" |
36 | :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); | 36 | : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) |
37 | : "1" ((size + 31) >> 5), "2" (addr), | ||
38 | "b" (addr) : "memory"); | ||
37 | return res; | 39 | return res; |
38 | } | 40 | } |
39 | 41 | ||
@@ -75,7 +77,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) | |||
75 | unsigned long val = *addr++; | 77 | unsigned long val = *addr++; |
76 | if (val) | 78 | if (val) |
77 | return __ffs(val) + x; | 79 | return __ffs(val) + x; |
78 | x += (sizeof(*addr)<<3); | 80 | x += sizeof(*addr) << 3; |
79 | } | 81 | } |
80 | return x; | 82 | return x; |
81 | } | 83 | } |
@@ -152,10 +154,10 @@ static inline int fls(int x) | |||
152 | 154 | ||
153 | #include <asm-generic/bitops/ext2-non-atomic.h> | 155 | #include <asm-generic/bitops/ext2-non-atomic.h> |
154 | 156 | ||
155 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 157 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
156 | test_and_set_bit((nr), (unsigned long *)addr) | 158 | test_and_set_bit((nr), (unsigned long *)(addr)) |
157 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 159 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
158 | test_and_clear_bit((nr), (unsigned long *)addr) | 160 | test_and_clear_bit((nr), (unsigned long *)(addr)) |
159 | 161 | ||
160 | #include <asm-generic/bitops/minix.h> | 162 | #include <asm-generic/bitops/minix.h> |
161 | 163 | ||
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index aaf15194d536..365f8207ea59 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h | |||
@@ -17,35 +17,35 @@ static inline long __scanbit(unsigned long val, unsigned long max) | |||
17 | return val; | 17 | return val; |
18 | } | 18 | } |
19 | 19 | ||
20 | #define find_first_bit(addr,size) \ | ||
21 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
22 | (__scanbit(*(unsigned long *)addr,(size))) : \ | ||
23 | find_first_bit(addr,size))) | ||
24 | |||
25 | #define find_next_bit(addr,size,off) \ | 20 | #define find_next_bit(addr,size,off) \ |
26 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 21 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
27 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ | 22 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ |
28 | find_next_bit(addr,size,off))) | 23 | find_next_bit(addr,size,off))) |
29 | 24 | ||
30 | #define find_first_zero_bit(addr,size) \ | ||
31 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
32 | (__scanbit(~*(unsigned long *)addr,(size))) : \ | ||
33 | find_first_zero_bit(addr,size))) | ||
34 | |||
35 | #define find_next_zero_bit(addr,size,off) \ | 25 | #define find_next_zero_bit(addr,size,off) \ |
36 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | 26 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ |
37 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ | 27 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ |
38 | find_next_zero_bit(addr,size,off))) | 28 | find_next_zero_bit(addr,size,off))) |
39 | 29 | ||
40 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | 30 | #define find_first_bit(addr, size) \ |
41 | int len) | 31 | ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ |
42 | { | 32 | ? (__scanbit(*(unsigned long *)(addr), (size))) \ |
43 | unsigned long end = i + len; | 33 | : find_first_bit((addr), (size)))) |
34 | |||
35 | #define find_first_zero_bit(addr, size) \ | ||
36 | ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ | ||
37 | ? (__scanbit(~*(unsigned long *)(addr), (size))) \ | ||
38 | : find_first_zero_bit((addr), (size)))) | ||
39 | |||
40 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | ||
41 | int len) | ||
42 | { | ||
43 | unsigned long end = i + len; | ||
44 | while (i < end) { | 44 | while (i < end) { |
45 | __set_bit(i, bitmap); | 45 | __set_bit(i, bitmap); |
46 | i++; | 46 | i++; |
47 | } | 47 | } |
48 | } | 48 | } |
49 | 49 | ||
50 | /** | 50 | /** |
51 | * ffz - find first zero in word. | 51 | * ffz - find first zero in word. |
@@ -150,10 +150,10 @@ static inline int fls(int x) | |||
150 | 150 | ||
151 | #include <asm-generic/bitops/ext2-non-atomic.h> | 151 | #include <asm-generic/bitops/ext2-non-atomic.h> |
152 | 152 | ||
153 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 153 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
154 | test_and_set_bit((nr),(unsigned long*)addr) | 154 | test_and_set_bit((nr), (unsigned long *)(addr)) |
155 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 155 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
156 | test_and_clear_bit((nr),(unsigned long*)addr) | 156 | test_and_clear_bit((nr), (unsigned long *)(addr)) |
157 | 157 | ||
158 | #include <asm-generic/bitops/minix.h> | 158 | #include <asm-generic/bitops/minix.h> |
159 | 159 | ||
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h index 8d477a201392..b69aa64b82a4 100644 --- a/include/asm-x86/bug.h +++ b/include/asm-x86/bug.h | |||
@@ -12,25 +12,25 @@ | |||
12 | # define __BUG_C0 "2:\t.quad 1b, %c0\n" | 12 | # define __BUG_C0 "2:\t.quad 1b, %c0\n" |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | #define BUG() \ | 15 | #define BUG() \ |
16 | do { \ | 16 | do { \ |
17 | asm volatile("1:\tud2\n" \ | 17 | asm volatile("1:\tud2\n" \ |
18 | ".pushsection __bug_table,\"a\"\n" \ | 18 | ".pushsection __bug_table,\"a\"\n" \ |
19 | __BUG_C0 \ | 19 | __BUG_C0 \ |
20 | "\t.word %c1, 0\n" \ | 20 | "\t.word %c1, 0\n" \ |
21 | "\t.org 2b+%c2\n" \ | 21 | "\t.org 2b+%c2\n" \ |
22 | ".popsection" \ | 22 | ".popsection" \ |
23 | : : "i" (__FILE__), "i" (__LINE__), \ | 23 | : : "i" (__FILE__), "i" (__LINE__), \ |
24 | "i" (sizeof(struct bug_entry))); \ | 24 | "i" (sizeof(struct bug_entry))); \ |
25 | for(;;) ; \ | 25 | for (;;) ; \ |
26 | } while(0) | 26 | } while (0) |
27 | 27 | ||
28 | #else | 28 | #else |
29 | #define BUG() \ | 29 | #define BUG() \ |
30 | do { \ | 30 | do { \ |
31 | asm volatile("ud2"); \ | 31 | asm volatile("ud2"); \ |
32 | for(;;) ; \ | 32 | for (;;) ; \ |
33 | } while(0) | 33 | } while (0) |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #endif /* !CONFIG_BUG */ | 36 | #endif /* !CONFIG_BUG */ |
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h index fe2f2e5d51ba..e02ae2d89acf 100644 --- a/include/asm-x86/byteorder.h +++ b/include/asm-x86/byteorder.h | |||
@@ -8,50 +8,59 @@ | |||
8 | 8 | ||
9 | #ifdef __i386__ | 9 | #ifdef __i386__ |
10 | 10 | ||
11 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | 11 | static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) |
12 | { | 12 | { |
13 | #ifdef CONFIG_X86_BSWAP | 13 | #ifdef CONFIG_X86_BSWAP |
14 | __asm__("bswap %0" : "=r" (x) : "0" (x)); | 14 | asm("bswap %0" : "=r" (x) : "0" (x)); |
15 | #else | 15 | #else |
16 | __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ | 16 | asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
17 | "rorl $16,%0\n\t" /* swap words */ | 17 | "rorl $16,%0\n\t" /* swap words */ |
18 | "xchgb %b0,%h0" /* swap higher bytes */ | 18 | "xchgb %b0,%h0" /* swap higher bytes */ |
19 | :"=q" (x) | 19 | : "=q" (x) |
20 | : "0" (x)); | 20 | : "0" (x)); |
21 | #endif | 21 | #endif |
22 | return x; | 22 | return x; |
23 | } | 23 | } |
24 | 24 | ||
25 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) | 25 | static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) |
26 | { | 26 | { |
27 | union { | 27 | union { |
28 | struct { __u32 a,b; } s; | 28 | struct { |
29 | __u32 a; | ||
30 | __u32 b; | ||
31 | } s; | ||
29 | __u64 u; | 32 | __u64 u; |
30 | } v; | 33 | } v; |
31 | v.u = val; | 34 | v.u = val; |
32 | #ifdef CONFIG_X86_BSWAP | 35 | #ifdef CONFIG_X86_BSWAP |
33 | __asm__("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | 36 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
34 | : "=r" (v.s.a), "=r" (v.s.b) | 37 | : "=r" (v.s.a), "=r" (v.s.b) |
35 | : "0" (v.s.a), "1" (v.s.b)); | 38 | : "0" (v.s.a), "1" (v.s.b)); |
36 | #else | 39 | #else |
37 | v.s.a = ___arch__swab32(v.s.a); | 40 | v.s.a = ___arch__swab32(v.s.a); |
38 | v.s.b = ___arch__swab32(v.s.b); | 41 | v.s.b = ___arch__swab32(v.s.b); |
39 | __asm__("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); | 42 | asm("xchgl %0,%1" |
43 | : "=r" (v.s.a), "=r" (v.s.b) | ||
44 | : "0" (v.s.a), "1" (v.s.b)); | ||
40 | #endif | 45 | #endif |
41 | return v.u; | 46 | return v.u; |
42 | } | 47 | } |
43 | 48 | ||
44 | #else /* __i386__ */ | 49 | #else /* __i386__ */ |
45 | 50 | ||
46 | static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) | 51 | static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) |
47 | { | 52 | { |
48 | __asm__("bswapq %0" : "=r" (x) : "0" (x)); | 53 | asm("bswapq %0" |
54 | : "=r" (x) | ||
55 | : "0" (x)); | ||
49 | return x; | 56 | return x; |
50 | } | 57 | } |
51 | 58 | ||
52 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) | 59 | static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) |
53 | { | 60 | { |
54 | __asm__("bswapl %0" : "=r" (x) : "0" (x)); | 61 | asm("bswapl %0" |
62 | : "=r" (x) | ||
63 | : "0" (x)); | ||
55 | return x; | 64 | return x; |
56 | } | 65 | } |
57 | 66 | ||
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5396c212d8c0..f4c0ab50d2c2 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h | |||
@@ -14,33 +14,85 @@ | |||
14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
16 | #define flush_icache_range(start, end) do { } while (0) | 16 | #define flush_icache_range(start, end) do { } while (0) |
17 | #define flush_icache_page(vma,pg) do { } while (0) | 17 | #define flush_icache_page(vma, pg) do { } while (0) |
18 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 18 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) |
19 | #define flush_cache_vmap(start, end) do { } while (0) | 19 | #define flush_cache_vmap(start, end) do { } while (0) |
20 | #define flush_cache_vunmap(start, end) do { } while (0) | 20 | #define flush_cache_vunmap(start, end) do { } while (0) |
21 | 21 | ||
22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
23 | memcpy(dst, src, len) | 23 | memcpy((dst), (src), (len)) |
24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
25 | memcpy(dst, src, len) | 25 | memcpy((dst), (src), (len)) |
26 | 26 | ||
27 | int __deprecated_for_modules change_page_attr(struct page *page, int numpages, | ||
28 | pgprot_t prot); | ||
29 | 27 | ||
30 | int set_pages_uc(struct page *page, int numpages); | 28 | /* |
31 | int set_pages_wb(struct page *page, int numpages); | 29 | * The set_memory_* API can be used to change various attributes of a virtual |
32 | int set_pages_x(struct page *page, int numpages); | 30 | * address range. The attributes include: |
33 | int set_pages_nx(struct page *page, int numpages); | 31 | * Cachability : UnCached, WriteCombining, WriteBack |
34 | int set_pages_ro(struct page *page, int numpages); | 32 | * Executability : eXeutable, NoteXecutable |
35 | int set_pages_rw(struct page *page, int numpages); | 33 | * Read/Write : ReadOnly, ReadWrite |
34 | * Presence : NotPresent | ||
35 | * | ||
36 | * Within a catagory, the attributes are mutually exclusive. | ||
37 | * | ||
38 | * The implementation of this API will take care of various aspects that | ||
39 | * are associated with changing such attributes, such as: | ||
40 | * - Flushing TLBs | ||
41 | * - Flushing CPU caches | ||
42 | * - Making sure aliases of the memory behind the mapping don't violate | ||
43 | * coherency rules as defined by the CPU in the system. | ||
44 | * | ||
45 | * What this API does not do: | ||
46 | * - Provide exclusion between various callers - including callers that | ||
47 | * operation on other mappings of the same physical page | ||
48 | * - Restore default attributes when a page is freed | ||
49 | * - Guarantee that mappings other than the requested one are | ||
50 | * in any state, other than that these do not violate rules for | ||
51 | * the CPU you have. Do not depend on any effects on other mappings, | ||
52 | * CPUs other than the one you have may have more relaxed rules. | ||
53 | * The caller is required to take care of these. | ||
54 | */ | ||
36 | 55 | ||
56 | int _set_memory_uc(unsigned long addr, int numpages); | ||
57 | int _set_memory_wc(unsigned long addr, int numpages); | ||
58 | int _set_memory_wb(unsigned long addr, int numpages); | ||
37 | int set_memory_uc(unsigned long addr, int numpages); | 59 | int set_memory_uc(unsigned long addr, int numpages); |
60 | int set_memory_wc(unsigned long addr, int numpages); | ||
38 | int set_memory_wb(unsigned long addr, int numpages); | 61 | int set_memory_wb(unsigned long addr, int numpages); |
39 | int set_memory_x(unsigned long addr, int numpages); | 62 | int set_memory_x(unsigned long addr, int numpages); |
40 | int set_memory_nx(unsigned long addr, int numpages); | 63 | int set_memory_nx(unsigned long addr, int numpages); |
41 | int set_memory_ro(unsigned long addr, int numpages); | 64 | int set_memory_ro(unsigned long addr, int numpages); |
42 | int set_memory_rw(unsigned long addr, int numpages); | 65 | int set_memory_rw(unsigned long addr, int numpages); |
43 | int set_memory_np(unsigned long addr, int numpages); | 66 | int set_memory_np(unsigned long addr, int numpages); |
67 | int set_memory_4k(unsigned long addr, int numpages); | ||
68 | |||
69 | /* | ||
70 | * For legacy compatibility with the old APIs, a few functions | ||
71 | * are provided that work on a "struct page". | ||
72 | * These functions operate ONLY on the 1:1 kernel mapping of the | ||
73 | * memory that the struct page represents, and internally just | ||
74 | * call the set_memory_* function. See the description of the | ||
75 | * set_memory_* function for more details on conventions. | ||
76 | * | ||
77 | * These APIs should be considered *deprecated* and are likely going to | ||
78 | * be removed in the future. | ||
79 | * The reason for this is the implicit operation on the 1:1 mapping only, | ||
80 | * making this not a generally useful API. | ||
81 | * | ||
82 | * Specifically, many users of the old APIs had a virtual address, | ||
83 | * called virt_to_page() or vmalloc_to_page() on that address to | ||
84 | * get a struct page* that the old API required. | ||
85 | * To convert these cases, use set_memory_*() on the original | ||
86 | * virtual address, do not use these functions. | ||
87 | */ | ||
88 | |||
89 | int set_pages_uc(struct page *page, int numpages); | ||
90 | int set_pages_wb(struct page *page, int numpages); | ||
91 | int set_pages_x(struct page *page, int numpages); | ||
92 | int set_pages_nx(struct page *page, int numpages); | ||
93 | int set_pages_ro(struct page *page, int numpages); | ||
94 | int set_pages_rw(struct page *page, int numpages); | ||
95 | |||
44 | 96 | ||
45 | void clflush_cache_range(void *addr, unsigned int size); | 97 | void clflush_cache_range(void *addr, unsigned int size); |
46 | 98 | ||
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h index 75194abbe8ee..52bbb0d8c4c1 100644 --- a/include/asm-x86/checksum_32.h +++ b/include/asm-x86/checksum_32.h | |||
@@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, | 30 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, |
31 | int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); | 31 | int len, __wsum sum, |
32 | int *src_err_ptr, int *dst_err_ptr); | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Note: when you get a NULL pointer exception here this means someone | 35 | * Note: when you get a NULL pointer exception here this means someone |
@@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, | |||
37 | * If you use these functions directly please don't forget the | 38 | * If you use these functions directly please don't forget the |
38 | * access_ok(). | 39 | * access_ok(). |
39 | */ | 40 | */ |
40 | static __inline__ | 41 | static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
41 | __wsum csum_partial_copy_nocheck (const void *src, void *dst, | 42 | int len, __wsum sum) |
42 | int len, __wsum sum) | ||
43 | { | 43 | { |
44 | return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); | 44 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
45 | } | 45 | } |
46 | 46 | ||
47 | static __inline__ | 47 | static inline __wsum csum_partial_copy_from_user(const void __user *src, |
48 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | 48 | void *dst, |
49 | int len, __wsum sum, int *err_ptr) | 49 | int len, __wsum sum, |
50 | int *err_ptr) | ||
50 | { | 51 | { |
51 | might_sleep(); | 52 | might_sleep(); |
52 | return csum_partial_copy_generic((__force void *)src, dst, | 53 | return csum_partial_copy_generic((__force void *)src, dst, |
53 | len, sum, err_ptr, NULL); | 54 | len, sum, err_ptr, NULL); |
54 | } | 55 | } |
55 | 56 | ||
56 | /* | 57 | /* |
@@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
64 | { | 65 | { |
65 | unsigned int sum; | 66 | unsigned int sum; |
66 | 67 | ||
67 | __asm__ __volatile__( | 68 | asm volatile("movl (%1), %0 ;\n" |
68 | "movl (%1), %0 ;\n" | 69 | "subl $4, %2 ;\n" |
69 | "subl $4, %2 ;\n" | 70 | "jbe 2f ;\n" |
70 | "jbe 2f ;\n" | 71 | "addl 4(%1), %0 ;\n" |
71 | "addl 4(%1), %0 ;\n" | 72 | "adcl 8(%1), %0 ;\n" |
72 | "adcl 8(%1), %0 ;\n" | 73 | "adcl 12(%1), %0;\n" |
73 | "adcl 12(%1), %0 ;\n" | 74 | "1: adcl 16(%1), %0 ;\n" |
74 | "1: adcl 16(%1), %0 ;\n" | 75 | "lea 4(%1), %1 ;\n" |
75 | "lea 4(%1), %1 ;\n" | 76 | "decl %2 ;\n" |
76 | "decl %2 ;\n" | 77 | "jne 1b ;\n" |
77 | "jne 1b ;\n" | 78 | "adcl $0, %0 ;\n" |
78 | "adcl $0, %0 ;\n" | 79 | "movl %0, %2 ;\n" |
79 | "movl %0, %2 ;\n" | 80 | "shrl $16, %0 ;\n" |
80 | "shrl $16, %0 ;\n" | 81 | "addw %w2, %w0 ;\n" |
81 | "addw %w2, %w0 ;\n" | 82 | "adcl $0, %0 ;\n" |
82 | "adcl $0, %0 ;\n" | 83 | "notl %0 ;\n" |
83 | "notl %0 ;\n" | 84 | "2: ;\n" |
84 | "2: ;\n" | ||
85 | /* Since the input registers which are loaded with iph and ihl | 85 | /* Since the input registers which are loaded with iph and ihl |
86 | are modified, we must also specify them as outputs, or gcc | 86 | are modified, we must also specify them as outputs, or gcc |
87 | will assume they contain their original values. */ | 87 | will assume they contain their original values. */ |
88 | : "=r" (sum), "=r" (iph), "=r" (ihl) | 88 | : "=r" (sum), "=r" (iph), "=r" (ihl) |
89 | : "1" (iph), "2" (ihl) | 89 | : "1" (iph), "2" (ihl) |
90 | : "memory"); | 90 | : "memory"); |
91 | return (__force __sum16)sum; | 91 | return (__force __sum16)sum; |
92 | } | 92 | } |
93 | 93 | ||
@@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
97 | 97 | ||
98 | static inline __sum16 csum_fold(__wsum sum) | 98 | static inline __sum16 csum_fold(__wsum sum) |
99 | { | 99 | { |
100 | __asm__( | 100 | asm("addl %1, %0 ;\n" |
101 | "addl %1, %0 ;\n" | 101 | "adcl $0xffff, %0 ;\n" |
102 | "adcl $0xffff, %0 ;\n" | 102 | : "=r" (sum) |
103 | : "=r" (sum) | 103 | : "r" ((__force u32)sum << 16), |
104 | : "r" ((__force u32)sum << 16), | 104 | "0" ((__force u32)sum & 0xffff0000)); |
105 | "0" ((__force u32)sum & 0xffff0000) | ||
106 | ); | ||
107 | return (__force __sum16)(~(__force u32)sum >> 16); | 105 | return (__force __sum16)(~(__force u32)sum >> 16); |
108 | } | 106 | } |
109 | 107 | ||
110 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 108 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
111 | unsigned short len, | 109 | unsigned short len, |
112 | unsigned short proto, | 110 | unsigned short proto, |
113 | __wsum sum) | 111 | __wsum sum) |
114 | { | 112 | { |
115 | __asm__( | 113 | asm("addl %1, %0 ;\n" |
116 | "addl %1, %0 ;\n" | 114 | "adcl %2, %0 ;\n" |
117 | "adcl %2, %0 ;\n" | 115 | "adcl %3, %0 ;\n" |
118 | "adcl %3, %0 ;\n" | 116 | "adcl $0, %0 ;\n" |
119 | "adcl $0, %0 ;\n" | 117 | : "=r" (sum) |
120 | : "=r" (sum) | 118 | : "g" (daddr), "g"(saddr), |
121 | : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); | 119 | "g" ((len + proto) << 8), "0" (sum)); |
122 | return sum; | 120 | return sum; |
123 | } | 121 | } |
124 | 122 | ||
125 | /* | 123 | /* |
@@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | |||
127 | * returns a 16-bit checksum, already complemented | 125 | * returns a 16-bit checksum, already complemented |
128 | */ | 126 | */ |
129 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | 127 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
130 | unsigned short len, | 128 | unsigned short len, |
131 | unsigned short proto, | 129 | unsigned short proto, |
132 | __wsum sum) | 130 | __wsum sum) |
133 | { | 131 | { |
134 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 132 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
135 | } | 133 | } |
136 | 134 | ||
137 | /* | 135 | /* |
@@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | |||
141 | 139 | ||
142 | static inline __sum16 ip_compute_csum(const void *buff, int len) | 140 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
143 | { | 141 | { |
144 | return csum_fold (csum_partial(buff, len, 0)); | 142 | return csum_fold(csum_partial(buff, len, 0)); |
145 | } | 143 | } |
146 | 144 | ||
147 | #define _HAVE_ARCH_IPV6_CSUM | 145 | #define _HAVE_ARCH_IPV6_CSUM |
148 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | 146 | static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
149 | const struct in6_addr *daddr, | 147 | const struct in6_addr *daddr, |
150 | __u32 len, unsigned short proto, | 148 | __u32 len, unsigned short proto, |
151 | __wsum sum) | 149 | __wsum sum) |
152 | { | 150 | { |
153 | __asm__( | 151 | asm("addl 0(%1), %0 ;\n" |
154 | "addl 0(%1), %0 ;\n" | 152 | "adcl 4(%1), %0 ;\n" |
155 | "adcl 4(%1), %0 ;\n" | 153 | "adcl 8(%1), %0 ;\n" |
156 | "adcl 8(%1), %0 ;\n" | 154 | "adcl 12(%1), %0 ;\n" |
157 | "adcl 12(%1), %0 ;\n" | 155 | "adcl 0(%2), %0 ;\n" |
158 | "adcl 0(%2), %0 ;\n" | 156 | "adcl 4(%2), %0 ;\n" |
159 | "adcl 4(%2), %0 ;\n" | 157 | "adcl 8(%2), %0 ;\n" |
160 | "adcl 8(%2), %0 ;\n" | 158 | "adcl 12(%2), %0 ;\n" |
161 | "adcl 12(%2), %0 ;\n" | 159 | "adcl %3, %0 ;\n" |
162 | "adcl %3, %0 ;\n" | 160 | "adcl %4, %0 ;\n" |
163 | "adcl %4, %0 ;\n" | 161 | "adcl $0, %0 ;\n" |
164 | "adcl $0, %0 ;\n" | 162 | : "=&r" (sum) |
165 | : "=&r" (sum) | 163 | : "r" (saddr), "r" (daddr), |
166 | : "r" (saddr), "r" (daddr), | 164 | "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); |
167 | "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); | ||
168 | 165 | ||
169 | return csum_fold(sum); | 166 | return csum_fold(sum); |
170 | } | 167 | } |
@@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
173 | * Copy and checksum to user | 170 | * Copy and checksum to user |
174 | */ | 171 | */ |
175 | #define HAVE_CSUM_COPY_USER | 172 | #define HAVE_CSUM_COPY_USER |
176 | static __inline__ __wsum csum_and_copy_to_user(const void *src, | 173 | static inline __wsum csum_and_copy_to_user(const void *src, |
177 | void __user *dst, | 174 | void __user *dst, |
178 | int len, __wsum sum, | 175 | int len, __wsum sum, |
179 | int *err_ptr) | 176 | int *err_ptr) |
180 | { | 177 | { |
181 | might_sleep(); | 178 | might_sleep(); |
182 | if (access_ok(VERIFY_WRITE, dst, len)) | 179 | if (access_ok(VERIFY_WRITE, dst, len)) |
183 | return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); | 180 | return csum_partial_copy_generic(src, (__force void *)dst, |
181 | len, sum, NULL, err_ptr); | ||
184 | 182 | ||
185 | if (len) | 183 | if (len) |
186 | *err_ptr = -EFAULT; | 184 | *err_ptr = -EFAULT; |
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h index e5f79997decc..8bd861cc5267 100644 --- a/include/asm-x86/checksum_64.h +++ b/include/asm-x86/checksum_64.h | |||
@@ -1,33 +1,31 @@ | |||
1 | #ifndef _X86_64_CHECKSUM_H | 1 | #ifndef _X86_64_CHECKSUM_H |
2 | #define _X86_64_CHECKSUM_H | 2 | #define _X86_64_CHECKSUM_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Checksums for x86-64 | 5 | * Checksums for x86-64 |
6 | * Copyright 2002 by Andi Kleen, SuSE Labs | 6 | * Copyright 2002 by Andi Kleen, SuSE Labs |
7 | * with some code from asm-x86/checksum.h | 7 | * with some code from asm-x86/checksum.h |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
12 | #include <asm/byteorder.h> | 12 | #include <asm/byteorder.h> |
13 | 13 | ||
14 | /** | 14 | /** |
15 | * csum_fold - Fold and invert a 32bit checksum. | 15 | * csum_fold - Fold and invert a 32bit checksum. |
16 | * sum: 32bit unfolded sum | 16 | * sum: 32bit unfolded sum |
17 | * | 17 | * |
18 | * Fold a 32bit running checksum to 16bit and invert it. This is usually | 18 | * Fold a 32bit running checksum to 16bit and invert it. This is usually |
19 | * the last step before putting a checksum into a packet. | 19 | * the last step before putting a checksum into a packet. |
20 | * Make sure not to mix with 64bit checksums. | 20 | * Make sure not to mix with 64bit checksums. |
21 | */ | 21 | */ |
22 | static inline __sum16 csum_fold(__wsum sum) | 22 | static inline __sum16 csum_fold(__wsum sum) |
23 | { | 23 | { |
24 | __asm__( | 24 | asm(" addl %1,%0\n" |
25 | " addl %1,%0\n" | 25 | " adcl $0xffff,%0" |
26 | " adcl $0xffff,%0" | 26 | : "=r" (sum) |
27 | : "=r" (sum) | 27 | : "r" ((__force u32)sum << 16), |
28 | : "r" ((__force u32)sum << 16), | 28 | "0" ((__force u32)sum & 0xffff0000)); |
29 | "0" ((__force u32)sum & 0xffff0000) | ||
30 | ); | ||
31 | return (__force __sum16)(~(__force u32)sum >> 16); | 29 | return (__force __sum16)(~(__force u32)sum >> 16); |
32 | } | 30 | } |
33 | 31 | ||
@@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum) | |||
43 | * ip_fast_csum - Compute the IPv4 header checksum efficiently. | 41 | * ip_fast_csum - Compute the IPv4 header checksum efficiently. |
44 | * iph: ipv4 header | 42 | * iph: ipv4 header |
45 | * ihl: length of header / 4 | 43 | * ihl: length of header / 4 |
46 | */ | 44 | */ |
47 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | 45 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
48 | { | 46 | { |
49 | unsigned int sum; | 47 | unsigned int sum; |
50 | 48 | ||
51 | asm( " movl (%1), %0\n" | 49 | asm(" movl (%1), %0\n" |
52 | " subl $4, %2\n" | 50 | " subl $4, %2\n" |
53 | " jbe 2f\n" | 51 | " jbe 2f\n" |
54 | " addl 4(%1), %0\n" | 52 | " addl 4(%1), %0\n" |
55 | " adcl 8(%1), %0\n" | 53 | " adcl 8(%1), %0\n" |
56 | " adcl 12(%1), %0\n" | 54 | " adcl 12(%1), %0\n" |
57 | "1: adcl 16(%1), %0\n" | 55 | "1: adcl 16(%1), %0\n" |
58 | " lea 4(%1), %1\n" | 56 | " lea 4(%1), %1\n" |
59 | " decl %2\n" | 57 | " decl %2\n" |
60 | " jne 1b\n" | 58 | " jne 1b\n" |
61 | " adcl $0, %0\n" | 59 | " adcl $0, %0\n" |
62 | " movl %0, %2\n" | 60 | " movl %0, %2\n" |
63 | " shrl $16, %0\n" | 61 | " shrl $16, %0\n" |
64 | " addw %w2, %w0\n" | 62 | " addw %w2, %w0\n" |
65 | " adcl $0, %0\n" | 63 | " adcl $0, %0\n" |
66 | " notl %0\n" | 64 | " notl %0\n" |
67 | "2:" | 65 | "2:" |
68 | /* Since the input registers which are loaded with iph and ihl | 66 | /* Since the input registers which are loaded with iph and ihl |
69 | are modified, we must also specify them as outputs, or gcc | 67 | are modified, we must also specify them as outputs, or gcc |
70 | will assume they contain their original values. */ | 68 | will assume they contain their original values. */ |
71 | : "=r" (sum), "=r" (iph), "=r" (ihl) | 69 | : "=r" (sum), "=r" (iph), "=r" (ihl) |
72 | : "1" (iph), "2" (ihl) | 70 | : "1" (iph), "2" (ihl) |
73 | : "memory"); | 71 | : "memory"); |
74 | return (__force __sum16)sum; | 72 | return (__force __sum16)sum; |
75 | } | 73 | } |
76 | 74 | ||
77 | /** | 75 | /** |
78 | * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. | 76 | * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. |
79 | * @saddr: source address | 77 | * @saddr: source address |
80 | * @daddr: destination address | 78 | * @daddr: destination address |
81 | * @len: length of packet | 79 | * @len: length of packet |
82 | * @proto: ip protocol of packet | 80 | * @proto: ip protocol of packet |
83 | * @sum: initial sum to be added in (32bit unfolded) | 81 | * @sum: initial sum to be added in (32bit unfolded) |
84 | * | 82 | * |
85 | * Returns the pseudo header checksum the input data. Result is | 83 | * Returns the pseudo header checksum the input data. Result is |
86 | * 32bit unfolded. | 84 | * 32bit unfolded. |
87 | */ | 85 | */ |
88 | static inline __wsum | 86 | static inline __wsum |
@@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, | |||
93 | " adcl %2, %0\n" | 91 | " adcl %2, %0\n" |
94 | " adcl %3, %0\n" | 92 | " adcl %3, %0\n" |
95 | " adcl $0, %0\n" | 93 | " adcl $0, %0\n" |
96 | : "=r" (sum) | 94 | : "=r" (sum) |
97 | : "g" (daddr), "g" (saddr), | 95 | : "g" (daddr), "g" (saddr), |
98 | "g" ((len + proto)<<8), "0" (sum)); | 96 | "g" ((len + proto)<<8), "0" (sum)); |
99 | return sum; | 97 | return sum; |
100 | } | 98 | } |
101 | 99 | ||
102 | 100 | ||
103 | /** | 101 | /** |
104 | * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. | 102 | * csum_tcpup_magic - Compute an IPv4 pseudo header checksum. |
105 | * @saddr: source address | 103 | * @saddr: source address |
106 | * @daddr: destination address | 104 | * @daddr: destination address |
107 | * @len: length of packet | 105 | * @len: length of packet |
108 | * @proto: ip protocol of packet | 106 | * @proto: ip protocol of packet |
109 | * @sum: initial sum to be added in (32bit unfolded) | 107 | * @sum: initial sum to be added in (32bit unfolded) |
110 | * | 108 | * |
111 | * Returns the 16bit pseudo header checksum the input data already | 109 | * Returns the 16bit pseudo header checksum the input data already |
112 | * complemented and ready to be filled in. | 110 | * complemented and ready to be filled in. |
113 | */ | 111 | */ |
114 | static inline __sum16 | 112 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
115 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, | 113 | unsigned short len, |
116 | unsigned short len, unsigned short proto, __wsum sum) | 114 | unsigned short proto, __wsum sum) |
117 | { | 115 | { |
118 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 116 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
119 | } | 117 | } |
120 | 118 | ||
121 | /** | 119 | /** |
122 | * csum_partial - Compute an internet checksum. | 120 | * csum_partial - Compute an internet checksum. |
123 | * @buff: buffer to be checksummed | 121 | * @buff: buffer to be checksummed |
124 | * @len: length of buffer. | 122 | * @len: length of buffer. |
@@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, | |||
127 | * Returns the 32bit unfolded internet checksum of the buffer. | 125 | * Returns the 32bit unfolded internet checksum of the buffer. |
128 | * Before filling it in it needs to be csum_fold()'ed. | 126 | * Before filling it in it needs to be csum_fold()'ed. |
129 | * buff should be aligned to a 64bit boundary if possible. | 127 | * buff should be aligned to a 64bit boundary if possible. |
130 | */ | 128 | */ |
131 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); | 129 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); |
132 | 130 | ||
133 | #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 | 131 | #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 |
@@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
136 | 134 | ||
137 | /* Do not call this directly. Use the wrappers below */ | 135 | /* Do not call this directly. Use the wrappers below */ |
138 | extern __wsum csum_partial_copy_generic(const void *src, const void *dst, | 136 | extern __wsum csum_partial_copy_generic(const void *src, const void *dst, |
139 | int len, | 137 | int len, __wsum sum, |
140 | __wsum sum, | 138 | int *src_err_ptr, int *dst_err_ptr); |
141 | int *src_err_ptr, int *dst_err_ptr); | ||
142 | 139 | ||
143 | 140 | ||
144 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, | 141 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
145 | int len, __wsum isum, int *errp); | 142 | int len, __wsum isum, int *errp); |
146 | extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, | 143 | extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, |
147 | int len, __wsum isum, int *errp); | 144 | int len, __wsum isum, int *errp); |
148 | extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, | 145 | extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
149 | __wsum sum); | 146 | int len, __wsum sum); |
150 | 147 | ||
151 | /* Old names. To be removed. */ | 148 | /* Old names. To be removed. */ |
152 | #define csum_and_copy_to_user csum_partial_copy_to_user | 149 | #define csum_and_copy_to_user csum_partial_copy_to_user |
153 | #define csum_and_copy_from_user csum_partial_copy_from_user | 150 | #define csum_and_copy_from_user csum_partial_copy_from_user |
154 | 151 | ||
155 | /** | 152 | /** |
156 | * ip_compute_csum - Compute an 16bit IP checksum. | 153 | * ip_compute_csum - Compute an 16bit IP checksum. |
157 | * @buff: buffer address. | 154 | * @buff: buffer address. |
158 | * @len: length of buffer. | 155 | * @len: length of buffer. |
@@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len); | |||
170 | * @proto: protocol of packet | 167 | * @proto: protocol of packet |
171 | * @sum: initial sum (32bit unfolded) to be added in | 168 | * @sum: initial sum (32bit unfolded) to be added in |
172 | * | 169 | * |
173 | * Computes an IPv6 pseudo header checksum. This sum is added the checksum | 170 | * Computes an IPv6 pseudo header checksum. This sum is added the checksum |
174 | * into UDP/TCP packets and contains some link layer information. | 171 | * into UDP/TCP packets and contains some link layer information. |
175 | * Returns the unfolded 32bit checksum. | 172 | * Returns the unfolded 32bit checksum. |
176 | */ | 173 | */ |
@@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, | |||
185 | static inline unsigned add32_with_carry(unsigned a, unsigned b) | 182 | static inline unsigned add32_with_carry(unsigned a, unsigned b) |
186 | { | 183 | { |
187 | asm("addl %2,%0\n\t" | 184 | asm("addl %2,%0\n\t" |
188 | "adcl $0,%0" | 185 | "adcl $0,%0" |
189 | : "=r" (a) | 186 | : "=r" (a) |
190 | : "0" (a), "r" (b)); | 187 | : "0" (a), "r" (b)); |
191 | return a; | 188 | return a; |
192 | } | 189 | } |
193 | 190 | ||
194 | #endif | 191 | #endif |
195 | |||
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h index 959fad00dff5..bf5a69d1329e 100644 --- a/include/asm-x86/cmpxchg_32.h +++ b/include/asm-x86/cmpxchg_32.h | |||
@@ -8,9 +8,12 @@ | |||
8 | * you need to test for the feature in boot_cpu_data. | 8 | * you need to test for the feature in boot_cpu_data. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | 11 | #define xchg(ptr, v) \ |
12 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | ||
12 | 13 | ||
13 | struct __xchg_dummy { unsigned long a[100]; }; | 14 | struct __xchg_dummy { |
15 | unsigned long a[100]; | ||
16 | }; | ||
14 | #define __xg(x) ((struct __xchg_dummy *)(x)) | 17 | #define __xg(x) ((struct __xchg_dummy *)(x)) |
15 | 18 | ||
16 | /* | 19 | /* |
@@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; }; | |||
27 | * of the instruction set reference 24319102.pdf. We need | 30 | * of the instruction set reference 24319102.pdf. We need |
28 | * the reader side to see the coherent 64bit value. | 31 | * the reader side to see the coherent 64bit value. |
29 | */ | 32 | */ |
30 | static inline void __set_64bit (unsigned long long * ptr, | 33 | static inline void __set_64bit(unsigned long long *ptr, |
31 | unsigned int low, unsigned int high) | 34 | unsigned int low, unsigned int high) |
32 | { | 35 | { |
33 | __asm__ __volatile__ ( | 36 | asm volatile("\n1:\t" |
34 | "\n1:\t" | 37 | "movl (%0), %%eax\n\t" |
35 | "movl (%0), %%eax\n\t" | 38 | "movl 4(%0), %%edx\n\t" |
36 | "movl 4(%0), %%edx\n\t" | 39 | LOCK_PREFIX "cmpxchg8b (%0)\n\t" |
37 | LOCK_PREFIX "cmpxchg8b (%0)\n\t" | 40 | "jnz 1b" |
38 | "jnz 1b" | 41 | : /* no outputs */ |
39 | : /* no outputs */ | 42 | : "D"(ptr), |
40 | : "D"(ptr), | 43 | "b"(low), |
41 | "b"(low), | 44 | "c"(high) |
42 | "c"(high) | 45 | : "ax", "dx", "memory"); |
43 | : "ax","dx","memory"); | ||
44 | } | 46 | } |
45 | 47 | ||
46 | static inline void __set_64bit_constant (unsigned long long *ptr, | 48 | static inline void __set_64bit_constant(unsigned long long *ptr, |
47 | unsigned long long value) | 49 | unsigned long long value) |
48 | { | 50 | { |
49 | __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); | 51 | __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
50 | } | 52 | } |
51 | #define ll_low(x) *(((unsigned int*)&(x))+0) | ||
52 | #define ll_high(x) *(((unsigned int*)&(x))+1) | ||
53 | 53 | ||
54 | static inline void __set_64bit_var (unsigned long long *ptr, | 54 | #define ll_low(x) *(((unsigned int *)&(x)) + 0) |
55 | unsigned long long value) | 55 | #define ll_high(x) *(((unsigned int *)&(x)) + 1) |
56 | |||
57 | static inline void __set_64bit_var(unsigned long long *ptr, | ||
58 | unsigned long long value) | ||
56 | { | 59 | { |
57 | __set_64bit(ptr,ll_low(value), ll_high(value)); | 60 | __set_64bit(ptr, ll_low(value), ll_high(value)); |
58 | } | 61 | } |
59 | 62 | ||
60 | #define set_64bit(ptr,value) \ | 63 | #define set_64bit(ptr, value) \ |
61 | (__builtin_constant_p(value) ? \ | 64 | (__builtin_constant_p((value)) \ |
62 | __set_64bit_constant(ptr, value) : \ | 65 | ? __set_64bit_constant((ptr), (value)) \ |
63 | __set_64bit_var(ptr, value) ) | 66 | : __set_64bit_var((ptr), (value))) |
64 | 67 | ||
65 | #define _set_64bit(ptr,value) \ | 68 | #define _set_64bit(ptr, value) \ |
66 | (__builtin_constant_p(value) ? \ | 69 | (__builtin_constant_p(value) \ |
67 | __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ | 70 | ? __set_64bit(ptr, (unsigned int)(value), \ |
68 | __set_64bit(ptr, ll_low(value), ll_high(value)) ) | 71 | (unsigned int)((value) >> 32)) \ |
72 | : __set_64bit(ptr, ll_low((value)), ll_high((value)))) | ||
69 | 73 | ||
70 | /* | 74 | /* |
71 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | 75 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
72 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 76 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
73 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 77 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
74 | */ | 78 | */ |
75 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 79 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
80 | int size) | ||
76 | { | 81 | { |
77 | switch (size) { | 82 | switch (size) { |
78 | case 1: | 83 | case 1: |
79 | __asm__ __volatile__("xchgb %b0,%1" | 84 | asm volatile("xchgb %b0,%1" |
80 | :"=q" (x) | 85 | : "=q" (x) |
81 | :"m" (*__xg(ptr)), "0" (x) | 86 | : "m" (*__xg(ptr)), "0" (x) |
82 | :"memory"); | 87 | : "memory"); |
83 | break; | 88 | break; |
84 | case 2: | 89 | case 2: |
85 | __asm__ __volatile__("xchgw %w0,%1" | 90 | asm volatile("xchgw %w0,%1" |
86 | :"=r" (x) | 91 | : "=r" (x) |
87 | :"m" (*__xg(ptr)), "0" (x) | 92 | : "m" (*__xg(ptr)), "0" (x) |
88 | :"memory"); | 93 | : "memory"); |
89 | break; | 94 | break; |
90 | case 4: | 95 | case 4: |
91 | __asm__ __volatile__("xchgl %0,%1" | 96 | asm volatile("xchgl %0,%1" |
92 | :"=r" (x) | 97 | : "=r" (x) |
93 | :"m" (*__xg(ptr)), "0" (x) | 98 | : "m" (*__xg(ptr)), "0" (x) |
94 | :"memory"); | 99 | : "memory"); |
95 | break; | 100 | break; |
96 | } | 101 | } |
97 | return x; | 102 | return x; |
98 | } | 103 | } |
@@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
105 | 110 | ||
106 | #ifdef CONFIG_X86_CMPXCHG | 111 | #ifdef CONFIG_X86_CMPXCHG |
107 | #define __HAVE_ARCH_CMPXCHG 1 | 112 | #define __HAVE_ARCH_CMPXCHG 1 |
108 | #define cmpxchg(ptr, o, n) \ | 113 | #define cmpxchg(ptr, o, n) \ |
109 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 114 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
110 | (unsigned long)(n), sizeof(*(ptr)))) | 115 | (unsigned long)(n), \ |
111 | #define sync_cmpxchg(ptr, o, n) \ | 116 | sizeof(*(ptr)))) |
112 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ | 117 | #define sync_cmpxchg(ptr, o, n) \ |
113 | (unsigned long)(n), sizeof(*(ptr)))) | 118 | ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ |
114 | #define cmpxchg_local(ptr, o, n) \ | 119 | (unsigned long)(n), \ |
115 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | 120 | sizeof(*(ptr)))) |
116 | (unsigned long)(n), sizeof(*(ptr)))) | 121 | #define cmpxchg_local(ptr, o, n) \ |
122 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | ||
123 | (unsigned long)(n), \ | ||
124 | sizeof(*(ptr)))) | ||
117 | #endif | 125 | #endif |
118 | 126 | ||
119 | #ifdef CONFIG_X86_CMPXCHG64 | 127 | #ifdef CONFIG_X86_CMPXCHG64 |
120 | #define cmpxchg64(ptr, o, n) \ | 128 | #define cmpxchg64(ptr, o, n) \ |
121 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ | 129 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
122 | (unsigned long long)(n))) | 130 | (unsigned long long)(n))) |
123 | #define cmpxchg64_local(ptr, o, n) \ | 131 | #define cmpxchg64_local(ptr, o, n) \ |
124 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ | 132 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
125 | (unsigned long long)(n))) | 133 | (unsigned long long)(n))) |
126 | #endif | 134 | #endif |
127 | 135 | ||
128 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | 136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
@@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
131 | unsigned long prev; | 139 | unsigned long prev; |
132 | switch (size) { | 140 | switch (size) { |
133 | case 1: | 141 | case 1: |
134 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | 142 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" |
135 | : "=a"(prev) | 143 | : "=a"(prev) |
136 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 144 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
137 | : "memory"); | 145 | : "memory"); |
138 | return prev; | 146 | return prev; |
139 | case 2: | 147 | case 2: |
140 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 148 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" |
141 | : "=a"(prev) | 149 | : "=a"(prev) |
142 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 150 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
143 | : "memory"); | 151 | : "memory"); |
144 | return prev; | 152 | return prev; |
145 | case 4: | 153 | case 4: |
146 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" | 154 | asm volatile(LOCK_PREFIX "cmpxchgl %1,%2" |
147 | : "=a"(prev) | 155 | : "=a"(prev) |
148 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 156 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
149 | : "memory"); | 157 | : "memory"); |
150 | return prev; | 158 | return prev; |
151 | } | 159 | } |
152 | return old; | 160 | return old; |
@@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
158 | * isn't. | 166 | * isn't. |
159 | */ | 167 | */ |
160 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, | 168 | static inline unsigned long __sync_cmpxchg(volatile void *ptr, |
161 | unsigned long old, | 169 | unsigned long old, |
162 | unsigned long new, int size) | 170 | unsigned long new, int size) |
163 | { | 171 | { |
164 | unsigned long prev; | 172 | unsigned long prev; |
165 | switch (size) { | 173 | switch (size) { |
166 | case 1: | 174 | case 1: |
167 | __asm__ __volatile__("lock; cmpxchgb %b1,%2" | 175 | asm volatile("lock; cmpxchgb %b1,%2" |
168 | : "=a"(prev) | 176 | : "=a"(prev) |
169 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 177 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
170 | : "memory"); | 178 | : "memory"); |
171 | return prev; | 179 | return prev; |
172 | case 2: | 180 | case 2: |
173 | __asm__ __volatile__("lock; cmpxchgw %w1,%2" | 181 | asm volatile("lock; cmpxchgw %w1,%2" |
174 | : "=a"(prev) | 182 | : "=a"(prev) |
175 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 183 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
176 | : "memory"); | 184 | : "memory"); |
177 | return prev; | 185 | return prev; |
178 | case 4: | 186 | case 4: |
179 | __asm__ __volatile__("lock; cmpxchgl %1,%2" | 187 | asm volatile("lock; cmpxchgl %1,%2" |
180 | : "=a"(prev) | 188 | : "=a"(prev) |
181 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 189 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
182 | : "memory"); | 190 | : "memory"); |
183 | return prev; | 191 | return prev; |
184 | } | 192 | } |
185 | return old; | 193 | return old; |
186 | } | 194 | } |
187 | 195 | ||
188 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 196 | static inline unsigned long __cmpxchg_local(volatile void *ptr, |
189 | unsigned long old, unsigned long new, int size) | 197 | unsigned long old, |
198 | unsigned long new, int size) | ||
190 | { | 199 | { |
191 | unsigned long prev; | 200 | unsigned long prev; |
192 | switch (size) { | 201 | switch (size) { |
193 | case 1: | 202 | case 1: |
194 | __asm__ __volatile__("cmpxchgb %b1,%2" | 203 | asm volatile("cmpxchgb %b1,%2" |
195 | : "=a"(prev) | 204 | : "=a"(prev) |
196 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 205 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
197 | : "memory"); | 206 | : "memory"); |
198 | return prev; | 207 | return prev; |
199 | case 2: | 208 | case 2: |
200 | __asm__ __volatile__("cmpxchgw %w1,%2" | 209 | asm volatile("cmpxchgw %w1,%2" |
201 | : "=a"(prev) | 210 | : "=a"(prev) |
202 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 211 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
203 | : "memory"); | 212 | : "memory"); |
204 | return prev; | 213 | return prev; |
205 | case 4: | 214 | case 4: |
206 | __asm__ __volatile__("cmpxchgl %1,%2" | 215 | asm volatile("cmpxchgl %1,%2" |
207 | : "=a"(prev) | 216 | : "=a"(prev) |
208 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 217 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
209 | : "memory"); | 218 | : "memory"); |
210 | return prev; | 219 | return prev; |
211 | } | 220 | } |
212 | return old; | 221 | return old; |
213 | } | 222 | } |
214 | 223 | ||
215 | static inline unsigned long long __cmpxchg64(volatile void *ptr, | 224 | static inline unsigned long long __cmpxchg64(volatile void *ptr, |
216 | unsigned long long old, unsigned long long new) | 225 | unsigned long long old, |
226 | unsigned long long new) | ||
217 | { | 227 | { |
218 | unsigned long long prev; | 228 | unsigned long long prev; |
219 | __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" | 229 | asm volatile(LOCK_PREFIX "cmpxchg8b %3" |
220 | : "=A"(prev) | 230 | : "=A"(prev) |
221 | : "b"((unsigned long)new), | 231 | : "b"((unsigned long)new), |
222 | "c"((unsigned long)(new >> 32)), | 232 | "c"((unsigned long)(new >> 32)), |
223 | "m"(*__xg(ptr)), | 233 | "m"(*__xg(ptr)), |
224 | "0"(old) | 234 | "0"(old) |
225 | : "memory"); | 235 | : "memory"); |
226 | return prev; | 236 | return prev; |
227 | } | 237 | } |
228 | 238 | ||
229 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, | 239 | static inline unsigned long long __cmpxchg64_local(volatile void *ptr, |
230 | unsigned long long old, unsigned long long new) | 240 | unsigned long long old, |
241 | unsigned long long new) | ||
231 | { | 242 | { |
232 | unsigned long long prev; | 243 | unsigned long long prev; |
233 | __asm__ __volatile__("cmpxchg8b %3" | 244 | asm volatile("cmpxchg8b %3" |
234 | : "=A"(prev) | 245 | : "=A"(prev) |
235 | : "b"((unsigned long)new), | 246 | : "b"((unsigned long)new), |
236 | "c"((unsigned long)(new >> 32)), | 247 | "c"((unsigned long)(new >> 32)), |
237 | "m"(*__xg(ptr)), | 248 | "m"(*__xg(ptr)), |
238 | "0"(old) | 249 | "0"(old) |
239 | : "memory"); | 250 | : "memory"); |
240 | return prev; | 251 | return prev; |
241 | } | 252 | } |
242 | 253 | ||
@@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); | |||
252 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); | 263 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); |
253 | 264 | ||
254 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, | 265 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, |
255 | unsigned long new, int size) | 266 | unsigned long new, int size) |
256 | { | 267 | { |
257 | switch (size) { | 268 | switch (size) { |
258 | case 1: | 269 | case 1: |
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h index 56f5b41e071c..d9b26b9a28cf 100644 --- a/include/asm-x86/cmpxchg_64.h +++ b/include/asm-x86/cmpxchg_64.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 4 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
5 | 5 | ||
6 | #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) | 6 | #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ |
7 | (ptr), sizeof(*(ptr)))) | ||
7 | 8 | ||
8 | #define __xg(x) ((volatile long *)(x)) | 9 | #define __xg(x) ((volatile long *)(x)) |
9 | 10 | ||
@@ -19,33 +20,34 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | |||
19 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | 20 | * Note 2: xchg has side effect, so that attribute volatile is necessary, |
20 | * but generally the primitive is invalid, *ptr is output argument. --ANK | 21 | * but generally the primitive is invalid, *ptr is output argument. --ANK |
21 | */ | 22 | */ |
22 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 23 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
24 | int size) | ||
23 | { | 25 | { |
24 | switch (size) { | 26 | switch (size) { |
25 | case 1: | 27 | case 1: |
26 | __asm__ __volatile__("xchgb %b0,%1" | 28 | asm volatile("xchgb %b0,%1" |
27 | :"=q" (x) | 29 | : "=q" (x) |
28 | :"m" (*__xg(ptr)), "0" (x) | 30 | : "m" (*__xg(ptr)), "0" (x) |
29 | :"memory"); | 31 | : "memory"); |
30 | break; | 32 | break; |
31 | case 2: | 33 | case 2: |
32 | __asm__ __volatile__("xchgw %w0,%1" | 34 | asm volatile("xchgw %w0,%1" |
33 | :"=r" (x) | 35 | : "=r" (x) |
34 | :"m" (*__xg(ptr)), "0" (x) | 36 | : "m" (*__xg(ptr)), "0" (x) |
35 | :"memory"); | 37 | : "memory"); |
36 | break; | 38 | break; |
37 | case 4: | 39 | case 4: |
38 | __asm__ __volatile__("xchgl %k0,%1" | 40 | asm volatile("xchgl %k0,%1" |
39 | :"=r" (x) | 41 | : "=r" (x) |
40 | :"m" (*__xg(ptr)), "0" (x) | 42 | : "m" (*__xg(ptr)), "0" (x) |
41 | :"memory"); | 43 | : "memory"); |
42 | break; | 44 | break; |
43 | case 8: | 45 | case 8: |
44 | __asm__ __volatile__("xchgq %0,%1" | 46 | asm volatile("xchgq %0,%1" |
45 | :"=r" (x) | 47 | : "=r" (x) |
46 | :"m" (*__xg(ptr)), "0" (x) | 48 | : "m" (*__xg(ptr)), "0" (x) |
47 | :"memory"); | 49 | : "memory"); |
48 | break; | 50 | break; |
49 | } | 51 | } |
50 | return x; | 52 | return x; |
51 | } | 53 | } |
@@ -64,61 +66,62 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
64 | unsigned long prev; | 66 | unsigned long prev; |
65 | switch (size) { | 67 | switch (size) { |
66 | case 1: | 68 | case 1: |
67 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" | 69 | asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" |
68 | : "=a"(prev) | 70 | : "=a"(prev) |
69 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 71 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
70 | : "memory"); | 72 | : "memory"); |
71 | return prev; | 73 | return prev; |
72 | case 2: | 74 | case 2: |
73 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 75 | asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" |
74 | : "=a"(prev) | 76 | : "=a"(prev) |
75 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 77 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
76 | : "memory"); | 78 | : "memory"); |
77 | return prev; | 79 | return prev; |
78 | case 4: | 80 | case 4: |
79 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" | 81 | asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" |
80 | : "=a"(prev) | 82 | : "=a"(prev) |
81 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 83 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
82 | : "memory"); | 84 | : "memory"); |
83 | return prev; | 85 | return prev; |
84 | case 8: | 86 | case 8: |
85 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" | 87 | asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" |
86 | : "=a"(prev) | 88 | : "=a"(prev) |
87 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 89 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
88 | : "memory"); | 90 | : "memory"); |
89 | return prev; | 91 | return prev; |
90 | } | 92 | } |
91 | return old; | 93 | return old; |
92 | } | 94 | } |
93 | 95 | ||
94 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | 96 | static inline unsigned long __cmpxchg_local(volatile void *ptr, |
95 | unsigned long old, unsigned long new, int size) | 97 | unsigned long old, |
98 | unsigned long new, int size) | ||
96 | { | 99 | { |
97 | unsigned long prev; | 100 | unsigned long prev; |
98 | switch (size) { | 101 | switch (size) { |
99 | case 1: | 102 | case 1: |
100 | __asm__ __volatile__("cmpxchgb %b1,%2" | 103 | asm volatile("cmpxchgb %b1,%2" |
101 | : "=a"(prev) | 104 | : "=a"(prev) |
102 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 105 | : "q"(new), "m"(*__xg(ptr)), "0"(old) |
103 | : "memory"); | 106 | : "memory"); |
104 | return prev; | 107 | return prev; |
105 | case 2: | 108 | case 2: |
106 | __asm__ __volatile__("cmpxchgw %w1,%2" | 109 | asm volatile("cmpxchgw %w1,%2" |
107 | : "=a"(prev) | 110 | : "=a"(prev) |
108 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 111 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
109 | : "memory"); | 112 | : "memory"); |
110 | return prev; | 113 | return prev; |
111 | case 4: | 114 | case 4: |
112 | __asm__ __volatile__("cmpxchgl %k1,%2" | 115 | asm volatile("cmpxchgl %k1,%2" |
113 | : "=a"(prev) | 116 | : "=a"(prev) |
114 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 117 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
115 | : "memory"); | 118 | : "memory"); |
116 | return prev; | 119 | return prev; |
117 | case 8: | 120 | case 8: |
118 | __asm__ __volatile__("cmpxchgq %1,%2" | 121 | asm volatile("cmpxchgq %1,%2" |
119 | : "=a"(prev) | 122 | : "=a"(prev) |
120 | : "r"(new), "m"(*__xg(ptr)), "0"(old) | 123 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
121 | : "memory"); | 124 | : "memory"); |
122 | return prev; | 125 | return prev; |
123 | } | 126 | } |
124 | return old; | 127 | return old; |
@@ -126,19 +129,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
126 | 129 | ||
127 | #define cmpxchg(ptr, o, n) \ | 130 | #define cmpxchg(ptr, o, n) \ |
128 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 131 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
129 | (unsigned long)(n), sizeof(*(ptr)))) | 132 | (unsigned long)(n), sizeof(*(ptr)))) |
130 | #define cmpxchg64(ptr, o, n) \ | 133 | #define cmpxchg64(ptr, o, n) \ |
131 | ({ \ | 134 | ({ \ |
132 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 135 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
133 | cmpxchg((ptr), (o), (n)); \ | 136 | cmpxchg((ptr), (o), (n)); \ |
134 | }) | 137 | }) |
135 | #define cmpxchg_local(ptr, o, n) \ | 138 | #define cmpxchg_local(ptr, o, n) \ |
136 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | 139 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ |
137 | (unsigned long)(n), sizeof(*(ptr)))) | 140 | (unsigned long)(n), \ |
141 | sizeof(*(ptr)))) | ||
138 | #define cmpxchg64_local(ptr, o, n) \ | 142 | #define cmpxchg64_local(ptr, o, n) \ |
139 | ({ \ | 143 | ({ \ |
140 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 144 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
141 | cmpxchg_local((ptr), (o), (n)); \ | 145 | cmpxchg_local((ptr), (o), (n)); \ |
142 | }) | 146 | }) |
143 | 147 | ||
144 | #endif | 148 | #endif |
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h index d3e8f3e87ee8..1793ac317a30 100644 --- a/include/asm-x86/compat.h +++ b/include/asm-x86/compat.h | |||
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
204 | return (u32)(unsigned long)uptr; | 204 | return (u32)(unsigned long)uptr; |
205 | } | 205 | } |
206 | 206 | ||
207 | static __inline__ void __user *compat_alloc_user_space(long len) | 207 | static inline void __user *compat_alloc_user_space(long len) |
208 | { | 208 | { |
209 | struct pt_regs *regs = task_pt_regs(current); | 209 | struct pt_regs *regs = task_pt_regs(current); |
210 | return (void __user *)regs->sp - len; | 210 | return (void __user *)regs->sp - len; |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 065e92966c7c..0d609c837a41 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -120,6 +120,9 @@ | |||
120 | extern const char * const x86_cap_flags[NCAPINTS*32]; | 120 | extern const char * const x86_cap_flags[NCAPINTS*32]; |
121 | extern const char * const x86_power_flags[32]; | 121 | extern const char * const x86_power_flags[32]; |
122 | 122 | ||
123 | #define test_cpu_cap(c, bit) \ | ||
124 | test_bit(bit, (unsigned long *)((c)->x86_capability)) | ||
125 | |||
123 | #define cpu_has(c, bit) \ | 126 | #define cpu_has(c, bit) \ |
124 | (__builtin_constant_p(bit) && \ | 127 | (__builtin_constant_p(bit) && \ |
125 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ | 128 | ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
@@ -131,7 +134,8 @@ extern const char * const x86_power_flags[32]; | |||
131 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ | 134 | (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
132 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ | 135 | (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ |
133 | ? 1 : \ | 136 | ? 1 : \ |
134 | test_bit(bit, (unsigned long *)((c)->x86_capability))) | 137 | test_cpu_cap(c, bit)) |
138 | |||
135 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) | 139 | #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
136 | 140 | ||
137 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) | 141 | #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
@@ -181,6 +185,8 @@ extern const char * const x86_power_flags[32]; | |||
181 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | 185 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
182 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | 186 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
183 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 187 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
188 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | ||
189 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | ||
184 | 190 | ||
185 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 191 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
186 | # define cpu_has_invlpg 1 | 192 | # define cpu_has_invlpg 1 |
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h index d35248539912..5af9bdb97a16 100644 --- a/include/asm-x86/current_32.h +++ b/include/asm-x86/current_32.h | |||
@@ -11,7 +11,7 @@ static __always_inline struct task_struct *get_current(void) | |||
11 | { | 11 | { |
12 | return x86_read_percpu(current_task); | 12 | return x86_read_percpu(current_task); |
13 | } | 13 | } |
14 | 14 | ||
15 | #define current get_current() | 15 | #define current get_current() |
16 | 16 | ||
17 | #endif /* !(_I386_CURRENT_H) */ | 17 | #endif /* !(_I386_CURRENT_H) */ |
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h index bc8adecee66d..2d368ede2fc1 100644 --- a/include/asm-x86/current_64.h +++ b/include/asm-x86/current_64.h | |||
@@ -1,23 +1,23 @@ | |||
1 | #ifndef _X86_64_CURRENT_H | 1 | #ifndef _X86_64_CURRENT_H |
2 | #define _X86_64_CURRENT_H | 2 | #define _X86_64_CURRENT_H |
3 | 3 | ||
4 | #if !defined(__ASSEMBLY__) | 4 | #if !defined(__ASSEMBLY__) |
5 | struct task_struct; | 5 | struct task_struct; |
6 | 6 | ||
7 | #include <asm/pda.h> | 7 | #include <asm/pda.h> |
8 | 8 | ||
9 | static inline struct task_struct *get_current(void) | 9 | static inline struct task_struct *get_current(void) |
10 | { | 10 | { |
11 | struct task_struct *t = read_pda(pcurrent); | 11 | struct task_struct *t = read_pda(pcurrent); |
12 | return t; | 12 | return t; |
13 | } | 13 | } |
14 | 14 | ||
15 | #define current get_current() | 15 | #define current get_current() |
16 | 16 | ||
17 | #else | 17 | #else |
18 | 18 | ||
19 | #ifndef ASM_OFFSET_H | 19 | #ifndef ASM_OFFSET_H |
20 | #include <asm/asm-offsets.h> | 20 | #include <asm/asm-offsets.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg | 23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg |
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 5b6a05d3a771..268a012bcd79 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h | |||
@@ -62,8 +62,8 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | static inline void pack_gate(gate_desc *gate, unsigned char type, | 64 | static inline void pack_gate(gate_desc *gate, unsigned char type, |
65 | unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) | 65 | unsigned long base, unsigned dpl, unsigned flags, |
66 | 66 | unsigned short seg) | |
67 | { | 67 | { |
68 | gate->a = (seg << 16) | (base & 0xffff); | 68 | gate->a = (seg << 16) | (base & 0xffff); |
69 | gate->b = (base & 0xffff0000) | | 69 | gate->b = (base & 0xffff0000) | |
@@ -84,22 +84,23 @@ static inline int desc_empty(const void *ptr) | |||
84 | #define load_TR_desc() native_load_tr_desc() | 84 | #define load_TR_desc() native_load_tr_desc() |
85 | #define load_gdt(dtr) native_load_gdt(dtr) | 85 | #define load_gdt(dtr) native_load_gdt(dtr) |
86 | #define load_idt(dtr) native_load_idt(dtr) | 86 | #define load_idt(dtr) native_load_idt(dtr) |
87 | #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) | 87 | #define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) |
88 | #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) | 88 | #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) |
89 | 89 | ||
90 | #define store_gdt(dtr) native_store_gdt(dtr) | 90 | #define store_gdt(dtr) native_store_gdt(dtr) |
91 | #define store_idt(dtr) native_store_idt(dtr) | 91 | #define store_idt(dtr) native_store_idt(dtr) |
92 | #define store_tr(tr) (tr = native_store_tr()) | 92 | #define store_tr(tr) (tr = native_store_tr()) |
93 | #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) | 93 | #define store_ldt(ldt) asm("sldt %0":"=m" (ldt)) |
94 | 94 | ||
95 | #define load_TLS(t, cpu) native_load_tls(t, cpu) | 95 | #define load_TLS(t, cpu) native_load_tls(t, cpu) |
96 | #define set_ldt native_set_ldt | 96 | #define set_ldt native_set_ldt |
97 | 97 | ||
98 | #define write_ldt_entry(dt, entry, desc) \ | 98 | #define write_ldt_entry(dt, entry, desc) \ |
99 | native_write_ldt_entry(dt, entry, desc) | 99 | native_write_ldt_entry(dt, entry, desc) |
100 | #define write_gdt_entry(dt, entry, desc, type) \ | 100 | #define write_gdt_entry(dt, entry, desc, type) \ |
101 | native_write_gdt_entry(dt, entry, desc, type) | 101 | native_write_gdt_entry(dt, entry, desc, type) |
102 | #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) | 102 | #define write_idt_entry(dt, entry, g) \ |
103 | native_write_idt_entry(dt, entry, g) | ||
103 | #endif | 104 | #endif |
104 | 105 | ||
105 | static inline void native_write_idt_entry(gate_desc *idt, int entry, | 106 | static inline void native_write_idt_entry(gate_desc *idt, int entry, |
@@ -138,8 +139,8 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, | |||
138 | { | 139 | { |
139 | desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); | 140 | desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); |
140 | desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | 141 | desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | |
141 | (limit & 0x000f0000) | ((type & 0xff) << 8) | | 142 | (limit & 0x000f0000) | ((type & 0xff) << 8) | |
142 | ((flags & 0xf) << 20); | 143 | ((flags & 0xf) << 20); |
143 | desc->p = 1; | 144 | desc->p = 1; |
144 | } | 145 | } |
145 | 146 | ||
@@ -159,7 +160,6 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, | |||
159 | desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; | 160 | desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; |
160 | desc->base3 = PTR_HIGH(addr); | 161 | desc->base3 = PTR_HIGH(addr); |
161 | #else | 162 | #else |
162 | |||
163 | pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); | 163 | pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); |
164 | #endif | 164 | #endif |
165 | } | 165 | } |
@@ -177,7 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) | |||
177 | * last valid byte | 177 | * last valid byte |
178 | */ | 178 | */ |
179 | set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, | 179 | set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, |
180 | IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1); | 180 | IO_BITMAP_OFFSET + IO_BITMAP_BYTES + |
181 | sizeof(unsigned long) - 1); | ||
181 | write_gdt_entry(d, entry, &tss, DESC_TSS); | 182 | write_gdt_entry(d, entry, &tss, DESC_TSS); |
182 | } | 183 | } |
183 | 184 | ||
@@ -186,7 +187,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) | |||
186 | static inline void native_set_ldt(const void *addr, unsigned int entries) | 187 | static inline void native_set_ldt(const void *addr, unsigned int entries) |
187 | { | 188 | { |
188 | if (likely(entries == 0)) | 189 | if (likely(entries == 0)) |
189 | __asm__ __volatile__("lldt %w0"::"q" (0)); | 190 | asm volatile("lldt %w0"::"q" (0)); |
190 | else { | 191 | else { |
191 | unsigned cpu = smp_processor_id(); | 192 | unsigned cpu = smp_processor_id(); |
192 | ldt_desc ldt; | 193 | ldt_desc ldt; |
@@ -195,7 +196,7 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) | |||
195 | DESC_LDT, entries * sizeof(ldt) - 1); | 196 | DESC_LDT, entries * sizeof(ldt) - 1); |
196 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, | 197 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, |
197 | &ldt, DESC_LDT); | 198 | &ldt, DESC_LDT); |
198 | __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); | 199 | asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); |
199 | } | 200 | } |
200 | } | 201 | } |
201 | 202 | ||
@@ -240,15 +241,15 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) | |||
240 | gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; | 241 | gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; |
241 | } | 242 | } |
242 | 243 | ||
243 | #define _LDT_empty(info) (\ | 244 | #define _LDT_empty(info) \ |
244 | (info)->base_addr == 0 && \ | 245 | ((info)->base_addr == 0 && \ |
245 | (info)->limit == 0 && \ | 246 | (info)->limit == 0 && \ |
246 | (info)->contents == 0 && \ | 247 | (info)->contents == 0 && \ |
247 | (info)->read_exec_only == 1 && \ | 248 | (info)->read_exec_only == 1 && \ |
248 | (info)->seg_32bit == 0 && \ | 249 | (info)->seg_32bit == 0 && \ |
249 | (info)->limit_in_pages == 0 && \ | 250 | (info)->limit_in_pages == 0 && \ |
250 | (info)->seg_not_present == 1 && \ | 251 | (info)->seg_not_present == 1 && \ |
251 | (info)->useable == 0) | 252 | (info)->useable == 0) |
252 | 253 | ||
253 | #ifdef CONFIG_X86_64 | 254 | #ifdef CONFIG_X86_64 |
254 | #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) | 255 | #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) |
@@ -287,7 +288,7 @@ static inline unsigned long get_desc_limit(const struct desc_struct *desc) | |||
287 | } | 288 | } |
288 | 289 | ||
289 | static inline void _set_gate(int gate, unsigned type, void *addr, | 290 | static inline void _set_gate(int gate, unsigned type, void *addr, |
290 | unsigned dpl, unsigned ist, unsigned seg) | 291 | unsigned dpl, unsigned ist, unsigned seg) |
291 | { | 292 | { |
292 | gate_desc s; | 293 | gate_desc s; |
293 | pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); | 294 | pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); |
@@ -370,10 +371,10 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist) | |||
370 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | 371 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. |
371 | */ | 372 | */ |
372 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | 373 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ |
373 | movb idx*8+4(gdt), lo_b; \ | 374 | movb idx * 8 + 4(gdt), lo_b; \ |
374 | movb idx*8+7(gdt), hi_b; \ | 375 | movb idx * 8 + 7(gdt), hi_b; \ |
375 | shll $16, base; \ | 376 | shll $16, base; \ |
376 | movw idx*8+2(gdt), lo_w; | 377 | movw idx * 8 + 2(gdt), lo_w; |
377 | 378 | ||
378 | 379 | ||
379 | #endif /* __ASSEMBLY__ */ | 380 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h index e33f078b3e54..eccb4ea1f918 100644 --- a/include/asm-x86/desc_defs.h +++ b/include/asm-x86/desc_defs.h | |||
@@ -18,17 +18,19 @@ | |||
18 | * incrementally. We keep the signature as a struct, rather than an union, | 18 | * incrementally. We keep the signature as a struct, rather than an union, |
19 | * so we can get rid of it transparently in the future -- glommer | 19 | * so we can get rid of it transparently in the future -- glommer |
20 | */ | 20 | */ |
21 | // 8 byte segment descriptor | 21 | /* 8 byte segment descriptor */ |
22 | struct desc_struct { | 22 | struct desc_struct { |
23 | union { | 23 | union { |
24 | struct { unsigned int a, b; }; | 24 | struct { |
25 | unsigned int a; | ||
26 | unsigned int b; | ||
27 | }; | ||
25 | struct { | 28 | struct { |
26 | u16 limit0; | 29 | u16 limit0; |
27 | u16 base0; | 30 | u16 base0; |
28 | unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; | 31 | unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; |
29 | unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; | 32 | unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; |
30 | }; | 33 | }; |
31 | |||
32 | }; | 34 | }; |
33 | } __attribute__((packed)); | 35 | } __attribute__((packed)); |
34 | 36 | ||
@@ -39,7 +41,7 @@ enum { | |||
39 | GATE_TASK = 0x5, | 41 | GATE_TASK = 0x5, |
40 | }; | 42 | }; |
41 | 43 | ||
42 | // 16byte gate | 44 | /* 16byte gate */ |
43 | struct gate_struct64 { | 45 | struct gate_struct64 { |
44 | u16 offset_low; | 46 | u16 offset_low; |
45 | u16 segment; | 47 | u16 segment; |
@@ -56,10 +58,10 @@ struct gate_struct64 { | |||
56 | enum { | 58 | enum { |
57 | DESC_TSS = 0x9, | 59 | DESC_TSS = 0x9, |
58 | DESC_LDT = 0x2, | 60 | DESC_LDT = 0x2, |
59 | DESCTYPE_S = 0x10, /* !system */ | 61 | DESCTYPE_S = 0x10, /* !system */ |
60 | }; | 62 | }; |
61 | 63 | ||
62 | // LDT or TSS descriptor in the GDT. 16 bytes. | 64 | /* LDT or TSS descriptor in the GDT. 16 bytes. */ |
63 | struct ldttss_desc64 { | 65 | struct ldttss_desc64 { |
64 | u16 limit0; | 66 | u16 limit0; |
65 | u16 base0; | 67 | u16 base0; |
@@ -84,7 +86,6 @@ struct desc_ptr { | |||
84 | unsigned long address; | 86 | unsigned long address; |
85 | } __attribute__((packed)) ; | 87 | } __attribute__((packed)) ; |
86 | 88 | ||
87 | |||
88 | #endif /* !__ASSEMBLY__ */ | 89 | #endif /* !__ASSEMBLY__ */ |
89 | 90 | ||
90 | #endif | 91 | #endif |
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index e98d16e7a37a..0dbf8bf3ef0a 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h | |||
@@ -17,18 +17,20 @@ | |||
17 | * This ends up being the most efficient "calling | 17 | * This ends up being the most efficient "calling |
18 | * convention" on x86. | 18 | * convention" on x86. |
19 | */ | 19 | */ |
20 | #define do_div(n,base) ({ \ | 20 | #define do_div(n, base) \ |
21 | unsigned long __upper, __low, __high, __mod, __base; \ | 21 | ({ \ |
22 | __base = (base); \ | 22 | unsigned long __upper, __low, __high, __mod, __base; \ |
23 | asm("":"=a" (__low), "=d" (__high):"A" (n)); \ | 23 | __base = (base); \ |
24 | __upper = __high; \ | 24 | asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ |
25 | if (__high) { \ | 25 | __upper = __high; \ |
26 | __upper = __high % (__base); \ | 26 | if (__high) { \ |
27 | __high = __high / (__base); \ | 27 | __upper = __high % (__base); \ |
28 | } \ | 28 | __high = __high / (__base); \ |
29 | asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ | 29 | } \ |
30 | asm("":"=A" (n):"a" (__low),"d" (__high)); \ | 30 | asm("divl %2":"=a" (__low), "=d" (__mod) \ |
31 | __mod; \ | 31 | : "rm" (__base), "0" (__low), "1" (__upper)); \ |
32 | asm("":"=A" (n) : "a" (__low), "d" (__high)); \ | ||
33 | __mod; \ | ||
32 | }) | 34 | }) |
33 | 35 | ||
34 | /* | 36 | /* |
@@ -37,14 +39,13 @@ | |||
37 | * | 39 | * |
38 | * Warning, this will do an exception if X overflows. | 40 | * Warning, this will do an exception if X overflows. |
39 | */ | 41 | */ |
40 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) | 42 | #define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c) |
41 | 43 | ||
42 | static inline long | 44 | static inline long div_ll_X_l_rem(long long divs, long div, long *rem) |
43 | div_ll_X_l_rem(long long divs, long div, long *rem) | ||
44 | { | 45 | { |
45 | long dum2; | 46 | long dum2; |
46 | __asm__("divl %2":"=a"(dum2), "=d"(*rem) | 47 | asm("divl %2":"=a"(dum2), "=d"(*rem) |
47 | : "rm"(div), "A"(divs)); | 48 | : "rm"(div), "A"(divs)); |
48 | 49 | ||
49 | return dum2; | 50 | return dum2; |
50 | 51 | ||
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h index e9733ce89880..ca1098a7e580 100644 --- a/include/asm-x86/dma.h +++ b/include/asm-x86/dma.h | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/io.h> /* need byte IO */ | 12 | #include <asm/io.h> /* need byte IO */ |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | 14 | ||
15 | |||
16 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | 15 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER |
17 | #define dma_outb outb_p | 16 | #define dma_outb outb_p |
18 | #else | 17 | #else |
@@ -74,15 +73,15 @@ | |||
74 | #ifdef CONFIG_X86_32 | 73 | #ifdef CONFIG_X86_32 |
75 | 74 | ||
76 | /* The maximum address that we can perform a DMA transfer to on this platform */ | 75 | /* The maximum address that we can perform a DMA transfer to on this platform */ |
77 | #define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) | 76 | #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000) |
78 | 77 | ||
79 | #else | 78 | #else |
80 | 79 | ||
81 | /* 16MB ISA DMA zone */ | 80 | /* 16MB ISA DMA zone */ |
82 | #define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT) | 81 | #define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT) |
83 | 82 | ||
84 | /* 4GB broken PCI/AGP hardware bus master zone */ | 83 | /* 4GB broken PCI/AGP hardware bus master zone */ |
85 | #define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT) | 84 | #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) |
86 | 85 | ||
87 | /* Compat define for old dma zone */ | 86 | /* Compat define for old dma zone */ |
88 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) | 87 | #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT)) |
@@ -154,20 +153,20 @@ | |||
154 | 153 | ||
155 | extern spinlock_t dma_spin_lock; | 154 | extern spinlock_t dma_spin_lock; |
156 | 155 | ||
157 | static __inline__ unsigned long claim_dma_lock(void) | 156 | static inline unsigned long claim_dma_lock(void) |
158 | { | 157 | { |
159 | unsigned long flags; | 158 | unsigned long flags; |
160 | spin_lock_irqsave(&dma_spin_lock, flags); | 159 | spin_lock_irqsave(&dma_spin_lock, flags); |
161 | return flags; | 160 | return flags; |
162 | } | 161 | } |
163 | 162 | ||
164 | static __inline__ void release_dma_lock(unsigned long flags) | 163 | static inline void release_dma_lock(unsigned long flags) |
165 | { | 164 | { |
166 | spin_unlock_irqrestore(&dma_spin_lock, flags); | 165 | spin_unlock_irqrestore(&dma_spin_lock, flags); |
167 | } | 166 | } |
168 | 167 | ||
169 | /* enable/disable a specific DMA channel */ | 168 | /* enable/disable a specific DMA channel */ |
170 | static __inline__ void enable_dma(unsigned int dmanr) | 169 | static inline void enable_dma(unsigned int dmanr) |
171 | { | 170 | { |
172 | if (dmanr <= 3) | 171 | if (dmanr <= 3) |
173 | dma_outb(dmanr, DMA1_MASK_REG); | 172 | dma_outb(dmanr, DMA1_MASK_REG); |
@@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr) | |||
175 | dma_outb(dmanr & 3, DMA2_MASK_REG); | 174 | dma_outb(dmanr & 3, DMA2_MASK_REG); |
176 | } | 175 | } |
177 | 176 | ||
178 | static __inline__ void disable_dma(unsigned int dmanr) | 177 | static inline void disable_dma(unsigned int dmanr) |
179 | { | 178 | { |
180 | if (dmanr <= 3) | 179 | if (dmanr <= 3) |
181 | dma_outb(dmanr | 4, DMA1_MASK_REG); | 180 | dma_outb(dmanr | 4, DMA1_MASK_REG); |
@@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr) | |||
190 | * --- In order to do that, the DMA routines below should --- | 189 | * --- In order to do that, the DMA routines below should --- |
191 | * --- only be used while holding the DMA lock ! --- | 190 | * --- only be used while holding the DMA lock ! --- |
192 | */ | 191 | */ |
193 | static __inline__ void clear_dma_ff(unsigned int dmanr) | 192 | static inline void clear_dma_ff(unsigned int dmanr) |
194 | { | 193 | { |
195 | if (dmanr <= 3) | 194 | if (dmanr <= 3) |
196 | dma_outb(0, DMA1_CLEAR_FF_REG); | 195 | dma_outb(0, DMA1_CLEAR_FF_REG); |
@@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr) | |||
199 | } | 198 | } |
200 | 199 | ||
201 | /* set mode (above) for a specific DMA channel */ | 200 | /* set mode (above) for a specific DMA channel */ |
202 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | 201 | static inline void set_dma_mode(unsigned int dmanr, char mode) |
203 | { | 202 | { |
204 | if (dmanr <= 3) | 203 | if (dmanr <= 3) |
205 | dma_outb(mode | dmanr, DMA1_MODE_REG); | 204 | dma_outb(mode | dmanr, DMA1_MODE_REG); |
@@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | |||
212 | * the lower 16 bits of the DMA current address register, but a 64k boundary | 211 | * the lower 16 bits of the DMA current address register, but a 64k boundary |
213 | * may have been crossed. | 212 | * may have been crossed. |
214 | */ | 213 | */ |
215 | static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | 214 | static inline void set_dma_page(unsigned int dmanr, char pagenr) |
216 | { | 215 | { |
217 | switch (dmanr) { | 216 | switch (dmanr) { |
218 | case 0: | 217 | case 0: |
@@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) | |||
243 | /* Set transfer address & page bits for specific DMA channel. | 242 | /* Set transfer address & page bits for specific DMA channel. |
244 | * Assumes dma flipflop is clear. | 243 | * Assumes dma flipflop is clear. |
245 | */ | 244 | */ |
246 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | 245 | static inline void set_dma_addr(unsigned int dmanr, unsigned int a) |
247 | { | 246 | { |
248 | set_dma_page(dmanr, a>>16); | 247 | set_dma_page(dmanr, a>>16); |
249 | if (dmanr <= 3) { | 248 | if (dmanr <= 3) { |
250 | dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); | 249 | dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); |
251 | dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); | 250 | dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); |
252 | } else { | 251 | } else { |
253 | dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); | 252 | dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); |
254 | dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); | 253 | dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); |
255 | } | 254 | } |
256 | } | 255 | } |
257 | 256 | ||
@@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | |||
264 | * Assumes dma flip-flop is clear. | 263 | * Assumes dma flip-flop is clear. |
265 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | 264 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. |
266 | */ | 265 | */ |
267 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | 266 | static inline void set_dma_count(unsigned int dmanr, unsigned int count) |
268 | { | 267 | { |
269 | count--; | 268 | count--; |
270 | if (dmanr <= 3) { | 269 | if (dmanr <= 3) { |
271 | dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | 270 | dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); |
272 | dma_outb((count >> 8) & 0xff, | 271 | dma_outb((count >> 8) & 0xff, |
273 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | 272 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); |
274 | } else { | 273 | } else { |
275 | dma_outb((count >> 1) & 0xff, | 274 | dma_outb((count >> 1) & 0xff, |
276 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 275 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); |
277 | dma_outb((count >> 9) & 0xff, | 276 | dma_outb((count >> 9) & 0xff, |
278 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | 277 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); |
279 | } | 278 | } |
280 | } | 279 | } |
281 | 280 | ||
@@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | |||
288 | * | 287 | * |
289 | * Assumes DMA flip-flop is clear. | 288 | * Assumes DMA flip-flop is clear. |
290 | */ | 289 | */ |
291 | static __inline__ int get_dma_residue(unsigned int dmanr) | 290 | static inline int get_dma_residue(unsigned int dmanr) |
292 | { | 291 | { |
293 | unsigned int io_port; | 292 | unsigned int io_port; |
294 | /* using short to get 16-bit wrap around */ | 293 | /* using short to get 16-bit wrap around */ |
diff --git a/include/asm-x86/dwarf2_64.h b/include/asm-x86/dwarf2_64.h index eedc08526b0b..c950519a264d 100644 --- a/include/asm-x86/dwarf2_64.h +++ b/include/asm-x86/dwarf2_64.h | |||
@@ -1,16 +1,15 @@ | |||
1 | #ifndef _DWARF2_H | 1 | #ifndef _DWARF2_H |
2 | #define _DWARF2_H 1 | 2 | #define _DWARF2_H 1 |
3 | 3 | ||
4 | |||
5 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
6 | #warning "asm/dwarf2.h should be only included in pure assembly files" | 5 | #warning "asm/dwarf2.h should be only included in pure assembly files" |
7 | #endif | 6 | #endif |
8 | 7 | ||
9 | /* | 8 | /* |
10 | Macros for dwarf2 CFI unwind table entries. | 9 | Macros for dwarf2 CFI unwind table entries. |
11 | See "as.info" for details on these pseudo ops. Unfortunately | 10 | See "as.info" for details on these pseudo ops. Unfortunately |
12 | they are only supported in very new binutils, so define them | 11 | they are only supported in very new binutils, so define them |
13 | away for older version. | 12 | away for older version. |
14 | */ | 13 | */ |
15 | 14 | ||
16 | #ifdef CONFIG_AS_CFI | 15 | #ifdef CONFIG_AS_CFI |
diff --git a/include/asm-x86/e820_32.h b/include/asm-x86/e820_32.h index e7207a6de3e0..43b1a8bd4b34 100644 --- a/include/asm-x86/e820_32.h +++ b/include/asm-x86/e820_32.h | |||
@@ -34,8 +34,8 @@ extern void e820_register_memory(void); | |||
34 | extern void limit_regions(unsigned long long size); | 34 | extern void limit_regions(unsigned long long size); |
35 | extern void print_memory_map(char *who); | 35 | extern void print_memory_map(char *who); |
36 | extern void init_iomem_resources(struct resource *code_resource, | 36 | extern void init_iomem_resources(struct resource *code_resource, |
37 | struct resource *data_resource, | 37 | struct resource *data_resource, |
38 | struct resource *bss_resource); | 38 | struct resource *bss_resource); |
39 | 39 | ||
40 | #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) | 40 | #if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) |
41 | extern void e820_mark_nosave_regions(void); | 41 | extern void e820_mark_nosave_regions(void); |
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h index 22ede73ae724..f478c57eb060 100644 --- a/include/asm-x86/e820_64.h +++ b/include/asm-x86/e820_64.h | |||
@@ -14,20 +14,24 @@ | |||
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | 15 | ||
16 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
17 | extern unsigned long find_e820_area(unsigned long start, unsigned long end, | 17 | extern unsigned long find_e820_area(unsigned long start, unsigned long end, |
18 | unsigned size, unsigned long align); | 18 | unsigned long size, unsigned long align); |
19 | extern void add_memory_region(unsigned long start, unsigned long size, | 19 | extern unsigned long find_e820_area_size(unsigned long start, |
20 | unsigned long *sizep, | ||
21 | unsigned long align); | ||
22 | extern void add_memory_region(unsigned long start, unsigned long size, | ||
20 | int type); | 23 | int type); |
21 | extern void update_memory_range(u64 start, u64 size, unsigned old_type, | 24 | extern void update_memory_range(u64 start, u64 size, unsigned old_type, |
22 | unsigned new_type); | 25 | unsigned new_type); |
23 | extern void setup_memory_region(void); | 26 | extern void setup_memory_region(void); |
24 | extern void contig_e820_setup(void); | 27 | extern void contig_e820_setup(void); |
25 | extern unsigned long e820_end_of_ram(void); | 28 | extern unsigned long e820_end_of_ram(void); |
26 | extern void e820_reserve_resources(struct resource *code_resource, | 29 | extern void e820_reserve_resources(void); |
27 | struct resource *data_resource, struct resource *bss_resource); | ||
28 | extern void e820_mark_nosave_regions(void); | 30 | extern void e820_mark_nosave_regions(void); |
29 | extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); | 31 | extern int e820_any_mapped(unsigned long start, unsigned long end, |
30 | extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); | 32 | unsigned type); |
33 | extern int e820_all_mapped(unsigned long start, unsigned long end, | ||
34 | unsigned type); | ||
31 | extern int e820_any_non_reserved(unsigned long start, unsigned long end); | 35 | extern int e820_any_non_reserved(unsigned long start, unsigned long end); |
32 | extern int is_memory_any_valid(unsigned long start, unsigned long end); | 36 | extern int is_memory_any_valid(unsigned long start, unsigned long end); |
33 | extern int e820_all_non_reserved(unsigned long start, unsigned long end); | 37 | extern int e820_all_non_reserved(unsigned long start, unsigned long end); |
@@ -35,8 +39,8 @@ extern int is_memory_all_valid(unsigned long start, unsigned long end); | |||
35 | extern unsigned long e820_hole_size(unsigned long start, unsigned long end); | 39 | extern unsigned long e820_hole_size(unsigned long start, unsigned long end); |
36 | 40 | ||
37 | extern void e820_setup_gap(void); | 41 | extern void e820_setup_gap(void); |
38 | extern void e820_register_active_regions(int nid, | 42 | extern void e820_register_active_regions(int nid, unsigned long start_pfn, |
39 | unsigned long start_pfn, unsigned long end_pfn); | 43 | unsigned long end_pfn); |
40 | 44 | ||
41 | extern void finish_e820_parsing(void); | 45 | extern void finish_e820_parsing(void); |
42 | 46 | ||
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h index cf3200a745ad..a8088f63a30e 100644 --- a/include/asm-x86/edac.h +++ b/include/asm-x86/edac.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ | 4 | /* ECC atomic, DMA, SMP and interrupt safe scrub function */ |
5 | 5 | ||
6 | static __inline__ void atomic_scrub(void *va, u32 size) | 6 | static inline void atomic_scrub(void *va, u32 size) |
7 | { | 7 | { |
8 | u32 i, *virt_addr = va; | 8 | u32 i, *virt_addr = va; |
9 | 9 | ||
@@ -12,7 +12,7 @@ static __inline__ void atomic_scrub(void *va, u32 size) | |||
12 | * are interrupt, DMA and SMP safe. | 12 | * are interrupt, DMA and SMP safe. |
13 | */ | 13 | */ |
14 | for (i = 0; i < size / 4; i++, virt_addr++) | 14 | for (i = 0; i < size / 4; i++, virt_addr++) |
15 | __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | 15 | asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); |
16 | } | 16 | } |
17 | 17 | ||
18 | #endif | 18 | #endif |
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index ea9734b74aca..d53004b855cc 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h | |||
@@ -20,7 +20,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #define efi_call_virt(f, args...) \ | 22 | #define efi_call_virt(f, args...) \ |
23 | ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) | 23 | ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) |
24 | 24 | ||
25 | #define efi_call_virt0(f) efi_call_virt(f) | 25 | #define efi_call_virt0(f) efi_call_virt(f) |
26 | #define efi_call_virt1(f, a1) efi_call_virt(f, a1) | 26 | #define efi_call_virt1(f, a1) efi_call_virt(f, a1) |
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h index fb62f9941e38..8f232dc5b5fe 100644 --- a/include/asm-x86/elf.h +++ b/include/asm-x86/elf.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | typedef unsigned long elf_greg_t; | 12 | typedef unsigned long elf_greg_t; |
13 | 13 | ||
14 | #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) | 14 | #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) |
15 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 15 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
16 | 16 | ||
17 | typedef struct user_i387_struct elf_fpregset_t; | 17 | typedef struct user_i387_struct elf_fpregset_t; |
@@ -82,8 +82,9 @@ extern unsigned int vdso_enabled; | |||
82 | #define elf_check_arch_ia32(x) \ | 82 | #define elf_check_arch_ia32(x) \ |
83 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) | 83 | (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) |
84 | 84 | ||
85 | #ifdef CONFIG_X86_32 | ||
86 | #include <asm/processor.h> | 85 | #include <asm/processor.h> |
86 | |||
87 | #ifdef CONFIG_X86_32 | ||
87 | #include <asm/system.h> /* for savesegment */ | 88 | #include <asm/system.h> /* for savesegment */ |
88 | #include <asm/desc.h> | 89 | #include <asm/desc.h> |
89 | 90 | ||
@@ -99,10 +100,11 @@ extern unsigned int vdso_enabled; | |||
99 | We might as well make sure everything else is cleared too (except for %esp), | 100 | We might as well make sure everything else is cleared too (except for %esp), |
100 | just to make things more deterministic. | 101 | just to make things more deterministic. |
101 | */ | 102 | */ |
102 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | 103 | #define ELF_PLAT_INIT(_r, load_addr) \ |
103 | _r->bx = 0; _r->cx = 0; _r->dx = 0; \ | 104 | do { \ |
104 | _r->si = 0; _r->di = 0; _r->bp = 0; \ | 105 | _r->bx = 0; _r->cx = 0; _r->dx = 0; \ |
105 | _r->ax = 0; \ | 106 | _r->si = 0; _r->di = 0; _r->bp = 0; \ |
107 | _r->ax = 0; \ | ||
106 | } while (0) | 108 | } while (0) |
107 | 109 | ||
108 | /* | 110 | /* |
@@ -110,24 +112,25 @@ extern unsigned int vdso_enabled; | |||
110 | * now struct_user_regs, they are different) | 112 | * now struct_user_regs, they are different) |
111 | */ | 113 | */ |
112 | 114 | ||
113 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | 115 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
114 | pr_reg[0] = regs->bx; \ | 116 | do { \ |
115 | pr_reg[1] = regs->cx; \ | 117 | pr_reg[0] = regs->bx; \ |
116 | pr_reg[2] = regs->dx; \ | 118 | pr_reg[1] = regs->cx; \ |
117 | pr_reg[3] = regs->si; \ | 119 | pr_reg[2] = regs->dx; \ |
118 | pr_reg[4] = regs->di; \ | 120 | pr_reg[3] = regs->si; \ |
119 | pr_reg[5] = regs->bp; \ | 121 | pr_reg[4] = regs->di; \ |
120 | pr_reg[6] = regs->ax; \ | 122 | pr_reg[5] = regs->bp; \ |
121 | pr_reg[7] = regs->ds & 0xffff; \ | 123 | pr_reg[6] = regs->ax; \ |
122 | pr_reg[8] = regs->es & 0xffff; \ | 124 | pr_reg[7] = regs->ds & 0xffff; \ |
123 | pr_reg[9] = regs->fs & 0xffff; \ | 125 | pr_reg[8] = regs->es & 0xffff; \ |
124 | savesegment(gs, pr_reg[10]); \ | 126 | pr_reg[9] = regs->fs & 0xffff; \ |
125 | pr_reg[11] = regs->orig_ax; \ | 127 | savesegment(gs, pr_reg[10]); \ |
126 | pr_reg[12] = regs->ip; \ | 128 | pr_reg[11] = regs->orig_ax; \ |
127 | pr_reg[13] = regs->cs & 0xffff; \ | 129 | pr_reg[12] = regs->ip; \ |
128 | pr_reg[14] = regs->flags; \ | 130 | pr_reg[13] = regs->cs & 0xffff; \ |
129 | pr_reg[15] = regs->sp; \ | 131 | pr_reg[14] = regs->flags; \ |
130 | pr_reg[16] = regs->ss & 0xffff; \ | 132 | pr_reg[15] = regs->sp; \ |
133 | pr_reg[16] = regs->ss & 0xffff; \ | ||
131 | } while (0); | 134 | } while (0); |
132 | 135 | ||
133 | #define ELF_PLATFORM (utsname()->machine) | 136 | #define ELF_PLATFORM (utsname()->machine) |
@@ -135,12 +138,10 @@ extern unsigned int vdso_enabled; | |||
135 | 138 | ||
136 | #else /* CONFIG_X86_32 */ | 139 | #else /* CONFIG_X86_32 */ |
137 | 140 | ||
138 | #include <asm/processor.h> | ||
139 | |||
140 | /* | 141 | /* |
141 | * This is used to ensure we don't load something for the wrong architecture. | 142 | * This is used to ensure we don't load something for the wrong architecture. |
142 | */ | 143 | */ |
143 | #define elf_check_arch(x) \ | 144 | #define elf_check_arch(x) \ |
144 | ((x)->e_machine == EM_X86_64) | 145 | ((x)->e_machine == EM_X86_64) |
145 | 146 | ||
146 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) | 147 | #define compat_elf_check_arch(x) elf_check_arch_ia32(x) |
@@ -169,24 +170,30 @@ static inline void elf_common_init(struct thread_struct *t, | |||
169 | t->ds = t->es = ds; | 170 | t->ds = t->es = ds; |
170 | } | 171 | } |
171 | 172 | ||
172 | #define ELF_PLAT_INIT(_r, load_addr) do { \ | 173 | #define ELF_PLAT_INIT(_r, load_addr) \ |
173 | elf_common_init(¤t->thread, _r, 0); \ | 174 | do { \ |
174 | clear_thread_flag(TIF_IA32); \ | 175 | elf_common_init(¤t->thread, _r, 0); \ |
176 | clear_thread_flag(TIF_IA32); \ | ||
175 | } while (0) | 177 | } while (0) |
176 | 178 | ||
177 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ | 179 | #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ |
178 | elf_common_init(¤t->thread, regs, __USER_DS) | 180 | elf_common_init(¤t->thread, regs, __USER_DS) |
179 | #define compat_start_thread(regs, ip, sp) do { \ | 181 | |
180 | start_ia32_thread(regs, ip, sp); \ | 182 | #define compat_start_thread(regs, ip, sp) \ |
181 | set_fs(USER_DS); \ | 183 | do { \ |
182 | } while (0) | 184 | start_ia32_thread(regs, ip, sp); \ |
183 | #define COMPAT_SET_PERSONALITY(ex, ibcs2) do { \ | 185 | set_fs(USER_DS); \ |
184 | if (test_thread_flag(TIF_IA32)) \ | 186 | } while (0) |
185 | clear_thread_flag(TIF_ABI_PENDING); \ | 187 | |
186 | else \ | 188 | #define COMPAT_SET_PERSONALITY(ex, ibcs2) \ |
187 | set_thread_flag(TIF_ABI_PENDING); \ | 189 | do { \ |
188 | current->personality |= force_personality32; \ | 190 | if (test_thread_flag(TIF_IA32)) \ |
189 | } while (0) | 191 | clear_thread_flag(TIF_ABI_PENDING); \ |
192 | else \ | ||
193 | set_thread_flag(TIF_ABI_PENDING); \ | ||
194 | current->personality |= force_personality32; \ | ||
195 | } while (0) | ||
196 | |||
190 | #define COMPAT_ELF_PLATFORM ("i686") | 197 | #define COMPAT_ELF_PLATFORM ("i686") |
191 | 198 | ||
192 | /* | 199 | /* |
@@ -195,7 +202,8 @@ static inline void elf_common_init(struct thread_struct *t, | |||
195 | * getting dumped. | 202 | * getting dumped. |
196 | */ | 203 | */ |
197 | 204 | ||
198 | #define ELF_CORE_COPY_REGS(pr_reg, regs) do { \ | 205 | #define ELF_CORE_COPY_REGS(pr_reg, regs) \ |
206 | do { \ | ||
199 | unsigned v; \ | 207 | unsigned v; \ |
200 | (pr_reg)[0] = (regs)->r15; \ | 208 | (pr_reg)[0] = (regs)->r15; \ |
201 | (pr_reg)[1] = (regs)->r14; \ | 209 | (pr_reg)[1] = (regs)->r14; \ |
@@ -269,10 +277,12 @@ extern int force_personality32; | |||
269 | 277 | ||
270 | struct task_struct; | 278 | struct task_struct; |
271 | 279 | ||
272 | #define ARCH_DLINFO_IA32(vdso_enabled) \ | 280 | #define ARCH_DLINFO_IA32(vdso_enabled) \ |
273 | do if (vdso_enabled) { \ | 281 | do { \ |
282 | if (vdso_enabled) { \ | ||
274 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ | 283 | NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ |
275 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ | 284 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ |
285 | } \ | ||
276 | } while (0) | 286 | } while (0) |
277 | 287 | ||
278 | #ifdef CONFIG_X86_32 | 288 | #ifdef CONFIG_X86_32 |
@@ -290,9 +300,11 @@ do if (vdso_enabled) { \ | |||
290 | /* 1GB for 64bit, 8MB for 32bit */ | 300 | /* 1GB for 64bit, 8MB for 32bit */ |
291 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) | 301 | #define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) |
292 | 302 | ||
293 | #define ARCH_DLINFO \ | 303 | #define ARCH_DLINFO \ |
294 | do if (vdso_enabled) { \ | 304 | do { \ |
295 | NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\ | 305 | if (vdso_enabled) \ |
306 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | ||
307 | (unsigned long)current->mm->context.vdso); \ | ||
296 | } while (0) | 308 | } while (0) |
297 | 309 | ||
298 | #define AT_SYSINFO 32 | 310 | #define AT_SYSINFO 32 |
@@ -305,8 +317,8 @@ do if (vdso_enabled) { \ | |||
305 | 317 | ||
306 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) | 318 | #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) |
307 | 319 | ||
308 | #define VDSO_ENTRY \ | 320 | #define VDSO_ENTRY \ |
309 | ((unsigned long) VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) | 321 | ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) |
310 | 322 | ||
311 | struct linux_binprm; | 323 | struct linux_binprm; |
312 | 324 | ||
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h index a7404d50686b..eb1665125c44 100644 --- a/include/asm-x86/fixmap_32.h +++ b/include/asm-x86/fixmap_32.h | |||
@@ -99,8 +99,7 @@ enum fixed_addresses { | |||
99 | */ | 99 | */ |
100 | #define NR_FIX_BTMAPS 64 | 100 | #define NR_FIX_BTMAPS 64 |
101 | #define FIX_BTMAPS_NESTING 4 | 101 | #define FIX_BTMAPS_NESTING 4 |
102 | FIX_BTMAP_END = | 102 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 512 - |
103 | __end_of_permanent_fixed_addresses + 512 - | ||
104 | (__end_of_permanent_fixed_addresses & 511), | 103 | (__end_of_permanent_fixed_addresses & 511), |
105 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, | 104 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1, |
106 | FIX_WP_TEST, | 105 | FIX_WP_TEST, |
@@ -110,20 +109,20 @@ enum fixed_addresses { | |||
110 | __end_of_fixed_addresses | 109 | __end_of_fixed_addresses |
111 | }; | 110 | }; |
112 | 111 | ||
113 | extern void __set_fixmap (enum fixed_addresses idx, | 112 | extern void __set_fixmap(enum fixed_addresses idx, |
114 | unsigned long phys, pgprot_t flags); | 113 | unsigned long phys, pgprot_t flags); |
115 | extern void reserve_top_address(unsigned long reserve); | 114 | extern void reserve_top_address(unsigned long reserve); |
116 | 115 | ||
117 | #define set_fixmap(idx, phys) \ | 116 | #define set_fixmap(idx, phys) \ |
118 | __set_fixmap(idx, phys, PAGE_KERNEL) | 117 | __set_fixmap(idx, phys, PAGE_KERNEL) |
119 | /* | 118 | /* |
120 | * Some hardware wants to get fixmapped without caching. | 119 | * Some hardware wants to get fixmapped without caching. |
121 | */ | 120 | */ |
122 | #define set_fixmap_nocache(idx, phys) \ | 121 | #define set_fixmap_nocache(idx, phys) \ |
123 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | 122 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) |
124 | 123 | ||
125 | #define clear_fixmap(idx) \ | 124 | #define clear_fixmap(idx) \ |
126 | __set_fixmap(idx, 0, __pgprot(0)) | 125 | __set_fixmap(idx, 0, __pgprot(0)) |
127 | 126 | ||
128 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) | 127 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) |
129 | 128 | ||
@@ -156,7 +155,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx) | |||
156 | if (idx >= __end_of_fixed_addresses) | 155 | if (idx >= __end_of_fixed_addresses) |
157 | __this_fixmap_does_not_exist(); | 156 | __this_fixmap_does_not_exist(); |
158 | 157 | ||
159 | return __fix_to_virt(idx); | 158 | return __fix_to_virt(idx); |
160 | } | 159 | } |
161 | 160 | ||
162 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | 161 | static inline unsigned long virt_to_fix(const unsigned long vaddr) |
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index 70ddb21e6458..f3d76858c0e6 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h | |||
@@ -34,32 +34,34 @@ | |||
34 | 34 | ||
35 | enum fixed_addresses { | 35 | enum fixed_addresses { |
36 | VSYSCALL_LAST_PAGE, | 36 | VSYSCALL_LAST_PAGE, |
37 | VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, | 37 | VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE |
38 | + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, | ||
38 | VSYSCALL_HPET, | 39 | VSYSCALL_HPET, |
39 | FIX_DBGP_BASE, | 40 | FIX_DBGP_BASE, |
40 | FIX_EARLYCON_MEM_BASE, | 41 | FIX_EARLYCON_MEM_BASE, |
41 | FIX_HPET_BASE, | 42 | FIX_HPET_BASE, |
42 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 43 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
43 | FIX_IO_APIC_BASE_0, | 44 | FIX_IO_APIC_BASE_0, |
44 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, | 45 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, |
45 | FIX_EFI_IO_MAP_LAST_PAGE, | 46 | FIX_EFI_IO_MAP_LAST_PAGE, |
46 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE+MAX_EFI_IO_PAGES-1, | 47 | FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE |
48 | + MAX_EFI_IO_PAGES - 1, | ||
47 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 49 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
48 | FIX_OHCI1394_BASE, | 50 | FIX_OHCI1394_BASE, |
49 | #endif | 51 | #endif |
50 | __end_of_fixed_addresses | 52 | __end_of_fixed_addresses |
51 | }; | 53 | }; |
52 | 54 | ||
53 | extern void __set_fixmap (enum fixed_addresses idx, | 55 | extern void __set_fixmap(enum fixed_addresses idx, |
54 | unsigned long phys, pgprot_t flags); | 56 | unsigned long phys, pgprot_t flags); |
55 | 57 | ||
56 | #define set_fixmap(idx, phys) \ | 58 | #define set_fixmap(idx, phys) \ |
57 | __set_fixmap(idx, phys, PAGE_KERNEL) | 59 | __set_fixmap(idx, phys, PAGE_KERNEL) |
58 | /* | 60 | /* |
59 | * Some hardware wants to get fixmapped without caching. | 61 | * Some hardware wants to get fixmapped without caching. |
60 | */ | 62 | */ |
61 | #define set_fixmap_nocache(idx, phys) \ | 63 | #define set_fixmap_nocache(idx, phys) \ |
62 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | 64 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) |
63 | 65 | ||
64 | #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) | 66 | #define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE) |
65 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | 67 | #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) |
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h index a48d7153c097..dbe82a5c5eac 100644 --- a/include/asm-x86/floppy.h +++ b/include/asm-x86/floppy.h | |||
@@ -20,20 +20,21 @@ | |||
20 | * driver otherwise. It doesn't matter much for performance anyway, as most | 20 | * driver otherwise. It doesn't matter much for performance anyway, as most |
21 | * floppy accesses go through the track buffer. | 21 | * floppy accesses go through the track buffer. |
22 | */ | 22 | */ |
23 | #define _CROSS_64KB(a,s,vdma) \ | 23 | #define _CROSS_64KB(a, s, vdma) \ |
24 | (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) | 24 | (!(vdma) && \ |
25 | ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) | ||
25 | 26 | ||
26 | #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) | 27 | #define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1) |
27 | 28 | ||
28 | 29 | ||
29 | #define SW fd_routine[use_virtual_dma&1] | 30 | #define SW fd_routine[use_virtual_dma & 1] |
30 | #define CSW fd_routine[can_use_virtual_dma & 1] | 31 | #define CSW fd_routine[can_use_virtual_dma & 1] |
31 | 32 | ||
32 | 33 | ||
33 | #define fd_inb(port) inb_p(port) | 34 | #define fd_inb(port) inb_p(port) |
34 | #define fd_outb(value,port) outb_p(value,port) | 35 | #define fd_outb(value, port) outb_p(value, port) |
35 | 36 | ||
36 | #define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") | 37 | #define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy") |
37 | #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) | 38 | #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) |
38 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) | 39 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) |
39 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | 40 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) |
@@ -52,64 +53,64 @@ static int doing_pdma; | |||
52 | 53 | ||
53 | static irqreturn_t floppy_hardint(int irq, void *dev_id) | 54 | static irqreturn_t floppy_hardint(int irq, void *dev_id) |
54 | { | 55 | { |
55 | register unsigned char st; | 56 | unsigned char st; |
56 | 57 | ||
57 | #undef TRACE_FLPY_INT | 58 | #undef TRACE_FLPY_INT |
58 | 59 | ||
59 | #ifdef TRACE_FLPY_INT | 60 | #ifdef TRACE_FLPY_INT |
60 | static int calls=0; | 61 | static int calls; |
61 | static int bytes=0; | 62 | static int bytes; |
62 | static int dma_wait=0; | 63 | static int dma_wait; |
63 | #endif | 64 | #endif |
64 | if (!doing_pdma) | 65 | if (!doing_pdma) |
65 | return floppy_interrupt(irq, dev_id); | 66 | return floppy_interrupt(irq, dev_id); |
66 | 67 | ||
67 | #ifdef TRACE_FLPY_INT | 68 | #ifdef TRACE_FLPY_INT |
68 | if(!calls) | 69 | if (!calls) |
69 | bytes = virtual_dma_count; | 70 | bytes = virtual_dma_count; |
70 | #endif | 71 | #endif |
71 | 72 | ||
72 | { | 73 | { |
73 | register int lcount; | 74 | int lcount; |
74 | register char *lptr; | 75 | char *lptr; |
75 | 76 | ||
76 | st = 1; | 77 | st = 1; |
77 | for(lcount=virtual_dma_count, lptr=virtual_dma_addr; | 78 | for (lcount = virtual_dma_count, lptr = virtual_dma_addr; |
78 | lcount; lcount--, lptr++) { | 79 | lcount; lcount--, lptr++) { |
79 | st=inb(virtual_dma_port+4) & 0xa0 ; | 80 | st = inb(virtual_dma_port + 4) & 0xa0; |
80 | if(st != 0xa0) | 81 | if (st != 0xa0) |
81 | break; | 82 | break; |
82 | if(virtual_dma_mode) | 83 | if (virtual_dma_mode) |
83 | outb_p(*lptr, virtual_dma_port+5); | 84 | outb_p(*lptr, virtual_dma_port + 5); |
84 | else | 85 | else |
85 | *lptr = inb_p(virtual_dma_port+5); | 86 | *lptr = inb_p(virtual_dma_port + 5); |
86 | } | 87 | } |
87 | virtual_dma_count = lcount; | 88 | virtual_dma_count = lcount; |
88 | virtual_dma_addr = lptr; | 89 | virtual_dma_addr = lptr; |
89 | st = inb(virtual_dma_port+4); | 90 | st = inb(virtual_dma_port + 4); |
90 | } | 91 | } |
91 | 92 | ||
92 | #ifdef TRACE_FLPY_INT | 93 | #ifdef TRACE_FLPY_INT |
93 | calls++; | 94 | calls++; |
94 | #endif | 95 | #endif |
95 | if(st == 0x20) | 96 | if (st == 0x20) |
96 | return IRQ_HANDLED; | 97 | return IRQ_HANDLED; |
97 | if(!(st & 0x20)) { | 98 | if (!(st & 0x20)) { |
98 | virtual_dma_residue += virtual_dma_count; | 99 | virtual_dma_residue += virtual_dma_count; |
99 | virtual_dma_count=0; | 100 | virtual_dma_count = 0; |
100 | #ifdef TRACE_FLPY_INT | 101 | #ifdef TRACE_FLPY_INT |
101 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", | 102 | printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", |
102 | virtual_dma_count, virtual_dma_residue, calls, bytes, | 103 | virtual_dma_count, virtual_dma_residue, calls, bytes, |
103 | dma_wait); | 104 | dma_wait); |
104 | calls = 0; | 105 | calls = 0; |
105 | dma_wait=0; | 106 | dma_wait = 0; |
106 | #endif | 107 | #endif |
107 | doing_pdma = 0; | 108 | doing_pdma = 0; |
108 | floppy_interrupt(irq, dev_id); | 109 | floppy_interrupt(irq, dev_id); |
109 | return IRQ_HANDLED; | 110 | return IRQ_HANDLED; |
110 | } | 111 | } |
111 | #ifdef TRACE_FLPY_INT | 112 | #ifdef TRACE_FLPY_INT |
112 | if(!virtual_dma_count) | 113 | if (!virtual_dma_count) |
113 | dma_wait++; | 114 | dma_wait++; |
114 | #endif | 115 | #endif |
115 | return IRQ_HANDLED; | 116 | return IRQ_HANDLED; |
@@ -117,14 +118,14 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) | |||
117 | 118 | ||
118 | static void fd_disable_dma(void) | 119 | static void fd_disable_dma(void) |
119 | { | 120 | { |
120 | if(! (can_use_virtual_dma & 1)) | 121 | if (!(can_use_virtual_dma & 1)) |
121 | disable_dma(FLOPPY_DMA); | 122 | disable_dma(FLOPPY_DMA); |
122 | doing_pdma = 0; | 123 | doing_pdma = 0; |
123 | virtual_dma_residue += virtual_dma_count; | 124 | virtual_dma_residue += virtual_dma_count; |
124 | virtual_dma_count=0; | 125 | virtual_dma_count = 0; |
125 | } | 126 | } |
126 | 127 | ||
127 | static int vdma_request_dma(unsigned int dmanr, const char * device_id) | 128 | static int vdma_request_dma(unsigned int dmanr, const char *device_id) |
128 | { | 129 | { |
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
@@ -142,7 +143,7 @@ static int vdma_get_dma_residue(unsigned int dummy) | |||
142 | 143 | ||
143 | static int fd_request_irq(void) | 144 | static int fd_request_irq(void) |
144 | { | 145 | { |
145 | if(can_use_virtual_dma) | 146 | if (can_use_virtual_dma) |
146 | return request_irq(FLOPPY_IRQ, floppy_hardint, | 147 | return request_irq(FLOPPY_IRQ, floppy_hardint, |
147 | IRQF_DISABLED, "floppy", NULL); | 148 | IRQF_DISABLED, "floppy", NULL); |
148 | else | 149 | else |
@@ -152,13 +153,13 @@ static int fd_request_irq(void) | |||
152 | 153 | ||
153 | static unsigned long dma_mem_alloc(unsigned long size) | 154 | static unsigned long dma_mem_alloc(unsigned long size) |
154 | { | 155 | { |
155 | return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size)); | 156 | return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size)); |
156 | } | 157 | } |
157 | 158 | ||
158 | 159 | ||
159 | static unsigned long vdma_mem_alloc(unsigned long size) | 160 | static unsigned long vdma_mem_alloc(unsigned long size) |
160 | { | 161 | { |
161 | return (unsigned long) vmalloc(size); | 162 | return (unsigned long)vmalloc(size); |
162 | 163 | ||
163 | } | 164 | } |
164 | 165 | ||
@@ -166,7 +167,7 @@ static unsigned long vdma_mem_alloc(unsigned long size) | |||
166 | 167 | ||
167 | static void _fd_dma_mem_free(unsigned long addr, unsigned long size) | 168 | static void _fd_dma_mem_free(unsigned long addr, unsigned long size) |
168 | { | 169 | { |
169 | if((unsigned long) addr >= (unsigned long) high_memory) | 170 | if ((unsigned long)addr >= (unsigned long)high_memory) |
170 | vfree((void *)addr); | 171 | vfree((void *)addr); |
171 | else | 172 | else |
172 | free_pages(addr, get_order(size)); | 173 | free_pages(addr, get_order(size)); |
@@ -176,10 +177,10 @@ static void _fd_dma_mem_free(unsigned long addr, unsigned long size) | |||
176 | 177 | ||
177 | static void _fd_chose_dma_mode(char *addr, unsigned long size) | 178 | static void _fd_chose_dma_mode(char *addr, unsigned long size) |
178 | { | 179 | { |
179 | if(can_use_virtual_dma == 2) { | 180 | if (can_use_virtual_dma == 2) { |
180 | if((unsigned long) addr >= (unsigned long) high_memory || | 181 | if ((unsigned long)addr >= (unsigned long)high_memory || |
181 | isa_virt_to_bus(addr) >= 0x1000000 || | 182 | isa_virt_to_bus(addr) >= 0x1000000 || |
182 | _CROSS_64KB(addr, size, 0)) | 183 | _CROSS_64KB(addr, size, 0)) |
183 | use_virtual_dma = 1; | 184 | use_virtual_dma = 1; |
184 | else | 185 | else |
185 | use_virtual_dma = 0; | 186 | use_virtual_dma = 0; |
@@ -195,7 +196,7 @@ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) | |||
195 | { | 196 | { |
196 | doing_pdma = 1; | 197 | doing_pdma = 1; |
197 | virtual_dma_port = io; | 198 | virtual_dma_port = io; |
198 | virtual_dma_mode = (mode == DMA_MODE_WRITE); | 199 | virtual_dma_mode = (mode == DMA_MODE_WRITE); |
199 | virtual_dma_addr = addr; | 200 | virtual_dma_addr = addr; |
200 | virtual_dma_count = size; | 201 | virtual_dma_count = size; |
201 | virtual_dma_residue = 0; | 202 | virtual_dma_residue = 0; |
@@ -213,18 +214,18 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) | |||
213 | /* actual, physical DMA */ | 214 | /* actual, physical DMA */ |
214 | doing_pdma = 0; | 215 | doing_pdma = 0; |
215 | clear_dma_ff(FLOPPY_DMA); | 216 | clear_dma_ff(FLOPPY_DMA); |
216 | set_dma_mode(FLOPPY_DMA,mode); | 217 | set_dma_mode(FLOPPY_DMA, mode); |
217 | set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); | 218 | set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr)); |
218 | set_dma_count(FLOPPY_DMA,size); | 219 | set_dma_count(FLOPPY_DMA, size); |
219 | enable_dma(FLOPPY_DMA); | 220 | enable_dma(FLOPPY_DMA); |
220 | return 0; | 221 | return 0; |
221 | } | 222 | } |
222 | 223 | ||
223 | static struct fd_routine_l { | 224 | static struct fd_routine_l { |
224 | int (*_request_dma)(unsigned int dmanr, const char * device_id); | 225 | int (*_request_dma)(unsigned int dmanr, const char *device_id); |
225 | void (*_free_dma)(unsigned int dmanr); | 226 | void (*_free_dma)(unsigned int dmanr); |
226 | int (*_get_dma_residue)(unsigned int dummy); | 227 | int (*_get_dma_residue)(unsigned int dummy); |
227 | unsigned long (*_dma_mem_alloc) (unsigned long size); | 228 | unsigned long (*_dma_mem_alloc)(unsigned long size); |
228 | int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); | 229 | int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); |
229 | } fd_routine[] = { | 230 | } fd_routine[] = { |
230 | { | 231 | { |
@@ -252,7 +253,8 @@ static int FDC2 = -1; | |||
252 | * is needed to prevent corrupted CMOS RAM in case "insmod floppy" | 253 | * is needed to prevent corrupted CMOS RAM in case "insmod floppy" |
253 | * coincides with another rtc CMOS user. Paul G. | 254 | * coincides with another rtc CMOS user. Paul G. |
254 | */ | 255 | */ |
255 | #define FLOPPY0_TYPE ({ \ | 256 | #define FLOPPY0_TYPE \ |
257 | ({ \ | ||
256 | unsigned long flags; \ | 258 | unsigned long flags; \ |
257 | unsigned char val; \ | 259 | unsigned char val; \ |
258 | spin_lock_irqsave(&rtc_lock, flags); \ | 260 | spin_lock_irqsave(&rtc_lock, flags); \ |
@@ -261,7 +263,8 @@ static int FDC2 = -1; | |||
261 | val; \ | 263 | val; \ |
262 | }) | 264 | }) |
263 | 265 | ||
264 | #define FLOPPY1_TYPE ({ \ | 266 | #define FLOPPY1_TYPE \ |
267 | ({ \ | ||
265 | unsigned long flags; \ | 268 | unsigned long flags; \ |
266 | unsigned char val; \ | 269 | unsigned char val; \ |
267 | spin_lock_irqsave(&rtc_lock, flags); \ | 270 | spin_lock_irqsave(&rtc_lock, flags); \ |
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h index c9952ea9f698..ac0fbf24d722 100644 --- a/include/asm-x86/futex.h +++ b/include/asm-x86/futex.h | |||
@@ -12,35 +12,32 @@ | |||
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | 13 | ||
14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ | 14 | #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ |
15 | __asm__ __volatile( \ | 15 | asm volatile("1:\t" insn "\n" \ |
16 | "1: " insn "\n" \ | 16 | "2:\t.section .fixup,\"ax\"\n" \ |
17 | "2: .section .fixup,\"ax\"\n \ | 17 | "3:\tmov\t%3, %1\n" \ |
18 | 3: mov %3, %1\n \ | 18 | "\tjmp\t2b\n" \ |
19 | jmp 2b\n \ | 19 | "\t.previous\n" \ |
20 | .previous\n" \ | 20 | _ASM_EXTABLE(1b, 3b) \ |
21 | _ASM_EXTABLE(1b,3b) \ | 21 | : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ |
22 | : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ | 22 | : "i" (-EFAULT), "0" (oparg), "1" (0)) |
23 | : "i" (-EFAULT), "0" (oparg), "1" (0)) | ||
24 | 23 | ||
25 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ | 24 | #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ |
26 | __asm__ __volatile( \ | 25 | asm volatile("1:\tmovl %2, %0\n" \ |
27 | "1: movl %2, %0\n \ | 26 | "\tmovl\t%0, %3\n" \ |
28 | movl %0, %3\n" \ | 27 | "\t" insn "\n" \ |
29 | insn "\n" \ | 28 | "2:\tlock; cmpxchgl %3, %2\n" \ |
30 | "2: lock; cmpxchgl %3, %2\n \ | 29 | "\tjnz\t1b\n" \ |
31 | jnz 1b\n \ | 30 | "3:\t.section .fixup,\"ax\"\n" \ |
32 | 3: .section .fixup,\"ax\"\n \ | 31 | "4:\tmov\t%5, %1\n" \ |
33 | 4: mov %5, %1\n \ | 32 | "\tjmp\t3b\n" \ |
34 | jmp 3b\n \ | 33 | "\t.previous\n" \ |
35 | .previous\n" \ | 34 | _ASM_EXTABLE(1b, 4b) \ |
36 | _ASM_EXTABLE(1b,4b) \ | 35 | _ASM_EXTABLE(2b, 4b) \ |
37 | _ASM_EXTABLE(2b,4b) \ | 36 | : "=&a" (oldval), "=&r" (ret), \ |
38 | : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ | 37 | "+m" (*uaddr), "=&r" (tem) \ |
39 | "=&r" (tem) \ | 38 | : "r" (oparg), "i" (-EFAULT), "1" (0)) |
40 | : "r" (oparg), "i" (-EFAULT), "1" (0)) | 39 | |
41 | 40 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
42 | static inline int | ||
43 | futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
44 | { | 41 | { |
45 | int op = (encoded_op >> 28) & 7; | 42 | int op = (encoded_op >> 28) & 7; |
46 | int cmp = (encoded_op >> 24) & 15; | 43 | int cmp = (encoded_op >> 24) & 15; |
@@ -87,20 +84,33 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
87 | 84 | ||
88 | if (!ret) { | 85 | if (!ret) { |
89 | switch (cmp) { | 86 | switch (cmp) { |
90 | case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; | 87 | case FUTEX_OP_CMP_EQ: |
91 | case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; | 88 | ret = (oldval == cmparg); |
92 | case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; | 89 | break; |
93 | case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; | 90 | case FUTEX_OP_CMP_NE: |
94 | case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; | 91 | ret = (oldval != cmparg); |
95 | case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; | 92 | break; |
96 | default: ret = -ENOSYS; | 93 | case FUTEX_OP_CMP_LT: |
94 | ret = (oldval < cmparg); | ||
95 | break; | ||
96 | case FUTEX_OP_CMP_GE: | ||
97 | ret = (oldval >= cmparg); | ||
98 | break; | ||
99 | case FUTEX_OP_CMP_LE: | ||
100 | ret = (oldval <= cmparg); | ||
101 | break; | ||
102 | case FUTEX_OP_CMP_GT: | ||
103 | ret = (oldval > cmparg); | ||
104 | break; | ||
105 | default: | ||
106 | ret = -ENOSYS; | ||
97 | } | 107 | } |
98 | } | 108 | } |
99 | return ret; | 109 | return ret; |
100 | } | 110 | } |
101 | 111 | ||
102 | static inline int | 112 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, |
103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 113 | int newval) |
104 | { | 114 | { |
105 | 115 | ||
106 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) | 116 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) |
@@ -112,16 +122,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | |||
112 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 122 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) |
113 | return -EFAULT; | 123 | return -EFAULT; |
114 | 124 | ||
115 | __asm__ __volatile__( | 125 | asm volatile("1:\tlock; cmpxchgl %3, %1\n" |
116 | "1: lock; cmpxchgl %3, %1 \n" | 126 | "2:\t.section .fixup, \"ax\"\n" |
117 | "2: .section .fixup, \"ax\" \n" | 127 | "3:\tmov %2, %0\n" |
118 | "3: mov %2, %0 \n" | 128 | "\tjmp 2b\n" |
119 | " jmp 2b \n" | 129 | "\t.previous\n" |
120 | " .previous \n" | 130 | _ASM_EXTABLE(1b, 3b) |
121 | _ASM_EXTABLE(1b,3b) | 131 | : "=a" (oldval), "+m" (*uaddr) |
122 | : "=a" (oldval), "+m" (*uaddr) | 132 | : "i" (-EFAULT), "r" (newval), "0" (oldval) |
123 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | 133 | : "memory" |
124 | : "memory" | ||
125 | ); | 134 | ); |
126 | 135 | ||
127 | return oldval; | 136 | return oldval; |
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 33e3ffe1766c..f1b96932746b 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h | |||
@@ -14,23 +14,22 @@ | |||
14 | * Copyright 2003 Andi Kleen, SuSE Labs. | 14 | * Copyright 2003 Andi Kleen, SuSE Labs. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | struct mpc_config_translation; | ||
18 | struct mpc_config_bus; | 17 | struct mpc_config_bus; |
19 | struct mp_config_table; | 18 | struct mp_config_table; |
20 | struct mpc_config_processor; | 19 | struct mpc_config_processor; |
21 | 20 | ||
22 | struct genapic { | 21 | struct genapic { |
23 | char *name; | 22 | char *name; |
24 | int (*probe)(void); | 23 | int (*probe)(void); |
25 | 24 | ||
26 | int (*apic_id_registered)(void); | 25 | int (*apic_id_registered)(void); |
27 | cpumask_t (*target_cpus)(void); | 26 | cpumask_t (*target_cpus)(void); |
28 | int int_delivery_mode; | 27 | int int_delivery_mode; |
29 | int int_dest_mode; | 28 | int int_dest_mode; |
30 | int ESR_DISABLE; | 29 | int ESR_DISABLE; |
31 | int apic_destination_logical; | 30 | int apic_destination_logical; |
32 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | 31 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); |
33 | unsigned long (*check_apicid_present)(int apicid); | 32 | unsigned long (*check_apicid_present)(int apicid); |
34 | int no_balance_irq; | 33 | int no_balance_irq; |
35 | int no_ioapic_check; | 34 | int no_ioapic_check; |
36 | void (*init_apic_ldr)(void); | 35 | void (*init_apic_ldr)(void); |
@@ -38,28 +37,21 @@ struct genapic { | |||
38 | 37 | ||
39 | void (*setup_apic_routing)(void); | 38 | void (*setup_apic_routing)(void); |
40 | int (*multi_timer_check)(int apic, int irq); | 39 | int (*multi_timer_check)(int apic, int irq); |
41 | int (*apicid_to_node)(int logical_apicid); | 40 | int (*apicid_to_node)(int logical_apicid); |
42 | int (*cpu_to_logical_apicid)(int cpu); | 41 | int (*cpu_to_logical_apicid)(int cpu); |
43 | int (*cpu_present_to_apicid)(int mps_cpu); | 42 | int (*cpu_present_to_apicid)(int mps_cpu); |
44 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | 43 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); |
45 | int (*mpc_apic_id)(struct mpc_config_processor *m, | 44 | void (*setup_portio_remap)(void); |
46 | struct mpc_config_translation *t); | ||
47 | void (*setup_portio_remap)(void); | ||
48 | int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); | 45 | int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); |
49 | void (*enable_apic_mode)(void); | 46 | void (*enable_apic_mode)(void); |
50 | u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); | 47 | u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); |
51 | 48 | ||
52 | /* mpparse */ | 49 | /* mpparse */ |
53 | void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, | ||
54 | struct mpc_config_translation *); | ||
55 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *, | ||
56 | struct mpc_config_translation *); | ||
57 | |||
58 | /* When one of the next two hooks returns 1 the genapic | 50 | /* When one of the next two hooks returns 1 the genapic |
59 | is switched to this. Essentially they are additional probe | 51 | is switched to this. Essentially they are additional probe |
60 | functions. */ | 52 | functions. */ |
61 | int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, | 53 | int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, |
62 | char *productid); | 54 | char *productid); |
63 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); | 55 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); |
64 | 56 | ||
65 | unsigned (*get_apic_id)(unsigned long x); | 57 | unsigned (*get_apic_id)(unsigned long x); |
@@ -72,7 +64,7 @@ struct genapic { | |||
72 | void (*send_IPI_allbutself)(int vector); | 64 | void (*send_IPI_allbutself)(int vector); |
73 | void (*send_IPI_all)(int vector); | 65 | void (*send_IPI_all)(int vector); |
74 | #endif | 66 | #endif |
75 | }; | 67 | }; |
76 | 68 | ||
77 | #define APICFUNC(x) .x = x, | 69 | #define APICFUNC(x) .x = x, |
78 | 70 | ||
@@ -85,43 +77,46 @@ struct genapic { | |||
85 | #define IPIFUNC(x) | 77 | #define IPIFUNC(x) |
86 | #endif | 78 | #endif |
87 | 79 | ||
88 | #define APIC_INIT(aname, aprobe) { \ | 80 | #define APIC_INIT(aname, aprobe) \ |
89 | .name = aname, \ | 81 | { \ |
90 | .probe = aprobe, \ | 82 | .name = aname, \ |
91 | .int_delivery_mode = INT_DELIVERY_MODE, \ | 83 | .probe = aprobe, \ |
92 | .int_dest_mode = INT_DEST_MODE, \ | 84 | .int_delivery_mode = INT_DELIVERY_MODE, \ |
93 | .no_balance_irq = NO_BALANCE_IRQ, \ | 85 | .int_dest_mode = INT_DEST_MODE, \ |
94 | .ESR_DISABLE = esr_disable, \ | 86 | .no_balance_irq = NO_BALANCE_IRQ, \ |
95 | .apic_destination_logical = APIC_DEST_LOGICAL, \ | 87 | .ESR_DISABLE = esr_disable, \ |
96 | APICFUNC(apic_id_registered) \ | 88 | .apic_destination_logical = APIC_DEST_LOGICAL, \ |
97 | APICFUNC(target_cpus) \ | 89 | APICFUNC(apic_id_registered) \ |
98 | APICFUNC(check_apicid_used) \ | 90 | APICFUNC(target_cpus) \ |
99 | APICFUNC(check_apicid_present) \ | 91 | APICFUNC(check_apicid_used) \ |
100 | APICFUNC(init_apic_ldr) \ | 92 | APICFUNC(check_apicid_present) \ |
101 | APICFUNC(ioapic_phys_id_map) \ | 93 | APICFUNC(init_apic_ldr) \ |
102 | APICFUNC(setup_apic_routing) \ | 94 | APICFUNC(ioapic_phys_id_map) \ |
103 | APICFUNC(multi_timer_check) \ | 95 | APICFUNC(setup_apic_routing) \ |
104 | APICFUNC(apicid_to_node) \ | 96 | APICFUNC(multi_timer_check) \ |
105 | APICFUNC(cpu_to_logical_apicid) \ | 97 | APICFUNC(apicid_to_node) \ |
106 | APICFUNC(cpu_present_to_apicid) \ | 98 | APICFUNC(cpu_to_logical_apicid) \ |
107 | APICFUNC(apicid_to_cpu_present) \ | 99 | APICFUNC(cpu_present_to_apicid) \ |
108 | APICFUNC(mpc_apic_id) \ | 100 | APICFUNC(apicid_to_cpu_present) \ |
109 | APICFUNC(setup_portio_remap) \ | 101 | APICFUNC(setup_portio_remap) \ |
110 | APICFUNC(check_phys_apicid_present) \ | 102 | APICFUNC(check_phys_apicid_present) \ |
111 | APICFUNC(mpc_oem_bus_info) \ | 103 | APICFUNC(mps_oem_check) \ |
112 | APICFUNC(mpc_oem_pci_bus) \ | 104 | APICFUNC(get_apic_id) \ |
113 | APICFUNC(mps_oem_check) \ | 105 | .apic_id_mask = APIC_ID_MASK, \ |
114 | APICFUNC(get_apic_id) \ | 106 | APICFUNC(cpu_mask_to_apicid) \ |
115 | .apic_id_mask = APIC_ID_MASK, \ | 107 | APICFUNC(acpi_madt_oem_check) \ |
116 | APICFUNC(cpu_mask_to_apicid) \ | 108 | IPIFUNC(send_IPI_mask) \ |
117 | APICFUNC(acpi_madt_oem_check) \ | 109 | IPIFUNC(send_IPI_allbutself) \ |
118 | IPIFUNC(send_IPI_mask) \ | 110 | IPIFUNC(send_IPI_all) \ |
119 | IPIFUNC(send_IPI_allbutself) \ | 111 | APICFUNC(enable_apic_mode) \ |
120 | IPIFUNC(send_IPI_all) \ | 112 | APICFUNC(phys_pkg_id) \ |
121 | APICFUNC(enable_apic_mode) \ | 113 | } |
122 | APICFUNC(phys_pkg_id) \ | ||
123 | } | ||
124 | 114 | ||
125 | extern struct genapic *genapic; | 115 | extern struct genapic *genapic; |
126 | 116 | ||
117 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | ||
118 | #define get_uv_system_type() UV_NONE | ||
119 | #define is_uv_system() 0 | ||
120 | |||
121 | |||
127 | #endif | 122 | #endif |
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index d7e516ccbaa4..1de931b263ce 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h | |||
@@ -33,5 +33,15 @@ extern struct genapic *genapic; | |||
33 | 33 | ||
34 | extern struct genapic apic_flat; | 34 | extern struct genapic apic_flat; |
35 | extern struct genapic apic_physflat; | 35 | extern struct genapic apic_physflat; |
36 | extern int acpi_madt_oem_check(char *, char *); | ||
37 | |||
38 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | ||
39 | extern enum uv_system_type get_uv_system_type(void); | ||
40 | extern int is_uv_system(void); | ||
41 | |||
42 | extern struct genapic apic_x2apic_uv_x; | ||
43 | DECLARE_PER_CPU(int, x2apic_extra_bits); | ||
44 | extern void uv_cpu_init(void); | ||
45 | extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); | ||
36 | 46 | ||
37 | #endif | 47 | #endif |
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 9e7280092a48..9870cc1f2f8f 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h | |||
@@ -167,7 +167,7 @@ static inline int is_geode(void) | |||
167 | /* MFGPTs */ | 167 | /* MFGPTs */ |
168 | 168 | ||
169 | #define MFGPT_MAX_TIMERS 8 | 169 | #define MFGPT_MAX_TIMERS 8 |
170 | #define MFGPT_TIMER_ANY -1 | 170 | #define MFGPT_TIMER_ANY (-1) |
171 | 171 | ||
172 | #define MFGPT_DOMAIN_WORKING 1 | 172 | #define MFGPT_DOMAIN_WORKING 1 |
173 | #define MFGPT_DOMAIN_STANDBY 2 | 173 | #define MFGPT_DOMAIN_STANDBY 2 |
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h index 479767c9195f..e153f3b44774 100644 --- a/include/asm-x86/highmem.h +++ b/include/asm-x86/highmem.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * Gerhard.Wichert@pdb.siemens.de | 8 | * Gerhard.Wichert@pdb.siemens.de |
9 | * | 9 | * |
10 | * | 10 | * |
11 | * Redesigned the x86 32-bit VM architecture to deal with | 11 | * Redesigned the x86 32-bit VM architecture to deal with |
12 | * up to 16 Terabyte physical memory. With current x86 CPUs | 12 | * up to 16 Terabyte physical memory. With current x86 CPUs |
13 | * we now support up to 64 Gigabytes physical RAM. | 13 | * we now support up to 64 Gigabytes physical RAM. |
14 | * | 14 | * |
diff --git a/include/asm-x86/hw_irq_64.h b/include/asm-x86/hw_irq_64.h index 312a58d6dac6..0062ef390f67 100644 --- a/include/asm-x86/hw_irq_64.h +++ b/include/asm-x86/hw_irq_64.h | |||
@@ -36,7 +36,7 @@ | |||
36 | * cleanup after irq migration. | 36 | * cleanup after irq migration. |
37 | */ | 37 | */ |
38 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR | 38 | #define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Vectors 0x30-0x3f are used for ISA interrupts. | 41 | * Vectors 0x30-0x3f are used for ISA interrupts. |
42 | */ | 42 | */ |
@@ -159,13 +159,12 @@ extern atomic_t irq_mis_count; | |||
159 | * SMP has a few special interrupts for IPI messages | 159 | * SMP has a few special interrupts for IPI messages |
160 | */ | 160 | */ |
161 | 161 | ||
162 | #define BUILD_IRQ(nr) \ | 162 | #define BUILD_IRQ(nr) \ |
163 | asmlinkage void IRQ_NAME(nr); \ | 163 | asmlinkage void IRQ_NAME(nr); \ |
164 | __asm__( \ | 164 | asm("\n.p2align\n" \ |
165 | "\n.p2align\n" \ | 165 | "IRQ" #nr "_interrupt:\n\t" \ |
166 | "IRQ" #nr "_interrupt:\n\t" \ | 166 | "push $~(" #nr ") ; " \ |
167 | "push $~(" #nr ") ; " \ | 167 | "jmp common_interrupt"); |
168 | "jmp common_interrupt"); | ||
169 | 168 | ||
170 | #define platform_legacy_irq(irq) ((irq) < 16) | 169 | #define platform_legacy_irq(irq) ((irq) < 16) |
171 | 170 | ||
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h index c16c6ff4bdd7..d2bbd238b3e1 100644 --- a/include/asm-x86/hypertransport.h +++ b/include/asm-x86/hypertransport.h | |||
@@ -8,12 +8,14 @@ | |||
8 | #define HT_IRQ_LOW_BASE 0xf8000000 | 8 | #define HT_IRQ_LOW_BASE 0xf8000000 |
9 | 9 | ||
10 | #define HT_IRQ_LOW_VECTOR_SHIFT 16 | 10 | #define HT_IRQ_LOW_VECTOR_SHIFT 16 |
11 | #define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 | 11 | #define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 |
12 | #define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) | 12 | #define HT_IRQ_LOW_VECTOR(v) \ |
13 | (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) | ||
13 | 14 | ||
14 | #define HT_IRQ_LOW_DEST_ID_SHIFT 8 | 15 | #define HT_IRQ_LOW_DEST_ID_SHIFT 8 |
15 | #define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 | 16 | #define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 |
16 | #define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) | 17 | #define HT_IRQ_LOW_DEST_ID(v) \ |
18 | (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) | ||
17 | 19 | ||
18 | #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 | 20 | #define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 |
19 | #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 | 21 | #define HT_IRQ_LOW_DM_LOGICAL 0x0000040 |
@@ -36,7 +38,8 @@ | |||
36 | 38 | ||
37 | 39 | ||
38 | #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 | 40 | #define HT_IRQ_HIGH_DEST_ID_SHIFT 0 |
39 | #define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff | 41 | #define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff |
40 | #define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) | 42 | #define HT_IRQ_HIGH_DEST_ID(v) \ |
43 | ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) | ||
41 | 44 | ||
42 | #endif /* ASM_HYPERTRANSPORT_H */ | 45 | #endif /* ASM_HYPERTRANSPORT_H */ |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index f377b76b2f34..54522b814f1c 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -41,7 +41,7 @@ static inline void tolerant_fwait(void) | |||
41 | { | 41 | { |
42 | asm volatile("1: fwait\n" | 42 | asm volatile("1: fwait\n" |
43 | "2:\n" | 43 | "2:\n" |
44 | _ASM_EXTABLE(1b,2b)); | 44 | _ASM_EXTABLE(1b, 2b)); |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 47 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) |
@@ -54,7 +54,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
54 | "3: movl $-1,%[err]\n" | 54 | "3: movl $-1,%[err]\n" |
55 | " jmp 2b\n" | 55 | " jmp 2b\n" |
56 | ".previous\n" | 56 | ".previous\n" |
57 | _ASM_EXTABLE(1b,3b) | 57 | _ASM_EXTABLE(1b, 3b) |
58 | : [err] "=r" (err) | 58 | : [err] "=r" (err) |
59 | #if 0 /* See comment in __save_init_fpu() below. */ | 59 | #if 0 /* See comment in __save_init_fpu() below. */ |
60 | : [fx] "r" (fx), "m" (*fx), "0" (0)); | 60 | : [fx] "r" (fx), "m" (*fx), "0" (0)); |
@@ -76,11 +76,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
76 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | 76 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) |
77 | { | 77 | { |
78 | if (unlikely(fx->swd & X87_FSW_ES)) | 78 | if (unlikely(fx->swd & X87_FSW_ES)) |
79 | asm volatile("fnclex"); | 79 | asm volatile("fnclex"); |
80 | alternative_input(ASM_NOP8 ASM_NOP2, | 80 | alternative_input(ASM_NOP8 ASM_NOP2, |
81 | " emms\n" /* clear stack tags */ | 81 | " emms\n" /* clear stack tags */ |
82 | " fildl %%gs:0", /* load to clear state */ | 82 | " fildl %%gs:0", /* load to clear state */ |
83 | X86_FEATURE_FXSAVE_LEAK); | 83 | X86_FEATURE_FXSAVE_LEAK); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | 86 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) |
@@ -93,14 +93,15 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
93 | "3: movl $-1,%[err]\n" | 93 | "3: movl $-1,%[err]\n" |
94 | " jmp 2b\n" | 94 | " jmp 2b\n" |
95 | ".previous\n" | 95 | ".previous\n" |
96 | _ASM_EXTABLE(1b,3b) | 96 | _ASM_EXTABLE(1b, 3b) |
97 | : [err] "=r" (err), "=m" (*fx) | 97 | : [err] "=r" (err), "=m" (*fx) |
98 | #if 0 /* See comment in __fxsave_clear() below. */ | 98 | #if 0 /* See comment in __fxsave_clear() below. */ |
99 | : [fx] "r" (fx), "0" (0)); | 99 | : [fx] "r" (fx), "0" (0)); |
100 | #else | 100 | #else |
101 | : [fx] "cdaSDb" (fx), "0" (0)); | 101 | : [fx] "cdaSDb" (fx), "0" (0)); |
102 | #endif | 102 | #endif |
103 | if (unlikely(err) && __clear_user(fx, sizeof(struct i387_fxsave_struct))) | 103 | if (unlikely(err) && |
104 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | ||
104 | err = -EFAULT; | 105 | err = -EFAULT; |
105 | /* No need to clear here because the caller clears USED_MATH */ | 106 | /* No need to clear here because the caller clears USED_MATH */ |
106 | return err; | 107 | return err; |
@@ -156,8 +157,10 @@ static inline int save_i387(struct _fpstate __user *buf) | |||
156 | return 0; | 157 | return 0; |
157 | clear_used_math(); /* trigger finit */ | 158 | clear_used_math(); /* trigger finit */ |
158 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | 159 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
159 | err = save_i387_checking((struct i387_fxsave_struct __user *)buf); | 160 | err = save_i387_checking((struct i387_fxsave_struct __user *) |
160 | if (err) return err; | 161 | buf); |
162 | if (err) | ||
163 | return err; | ||
161 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 164 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
162 | stts(); | 165 | stts(); |
163 | } else { | 166 | } else { |
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index 67c319e0efc7..45d4df3e51e6 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h | |||
@@ -1,9 +1,11 @@ | |||
1 | #ifndef __ASM_I8259_H__ | 1 | #ifndef __ASM_I8259_H__ |
2 | #define __ASM_I8259_H__ | 2 | #define __ASM_I8259_H__ |
3 | 3 | ||
4 | #include <linux/delay.h> | ||
5 | |||
4 | extern unsigned int cached_irq_mask; | 6 | extern unsigned int cached_irq_mask; |
5 | 7 | ||
6 | #define __byte(x,y) (((unsigned char *) &(y))[x]) | 8 | #define __byte(x, y) (((unsigned char *)&(y))[x]) |
7 | #define cached_master_mask (__byte(0, cached_irq_mask)) | 9 | #define cached_master_mask (__byte(0, cached_irq_mask)) |
8 | #define cached_slave_mask (__byte(1, cached_irq_mask)) | 10 | #define cached_slave_mask (__byte(1, cached_irq_mask)) |
9 | 11 | ||
@@ -29,7 +31,28 @@ extern void enable_8259A_irq(unsigned int irq); | |||
29 | extern void disable_8259A_irq(unsigned int irq); | 31 | extern void disable_8259A_irq(unsigned int irq); |
30 | extern unsigned int startup_8259A_irq(unsigned int irq); | 32 | extern unsigned int startup_8259A_irq(unsigned int irq); |
31 | 33 | ||
32 | #define inb_pic inb_p | 34 | /* the PIC may need a careful delay on some platforms, hence specific calls */ |
33 | #define outb_pic outb_p | 35 | static inline unsigned char inb_pic(unsigned int port) |
36 | { | ||
37 | unsigned char value = inb(port); | ||
38 | |||
39 | /* | ||
40 | * delay for some accesses to PIC on motherboard or in chipset | ||
41 | * must be at least one microsecond, so be safe here: | ||
42 | */ | ||
43 | udelay(2); | ||
44 | |||
45 | return value; | ||
46 | } | ||
47 | |||
48 | static inline void outb_pic(unsigned char value, unsigned int port) | ||
49 | { | ||
50 | outb(value, port); | ||
51 | /* | ||
52 | * delay for some accesses to PIC on motherboard or in chipset | ||
53 | * must be at least one microsecond, so be safe here: | ||
54 | */ | ||
55 | udelay(2); | ||
56 | } | ||
34 | 57 | ||
35 | #endif /* __ASM_I8259_H__ */ | 58 | #endif /* __ASM_I8259_H__ */ |
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h index aa9733206e29..55d3abe5276f 100644 --- a/include/asm-x86/ia32.h +++ b/include/asm-x86/ia32.h | |||
@@ -14,19 +14,19 @@ | |||
14 | 14 | ||
15 | /* signal.h */ | 15 | /* signal.h */ |
16 | struct sigaction32 { | 16 | struct sigaction32 { |
17 | unsigned int sa_handler; /* Really a pointer, but need to deal | 17 | unsigned int sa_handler; /* Really a pointer, but need to deal |
18 | with 32 bits */ | 18 | with 32 bits */ |
19 | unsigned int sa_flags; | 19 | unsigned int sa_flags; |
20 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 20 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
21 | compat_sigset_t sa_mask; /* A 32 bit mask */ | 21 | compat_sigset_t sa_mask; /* A 32 bit mask */ |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct old_sigaction32 { | 24 | struct old_sigaction32 { |
25 | unsigned int sa_handler; /* Really a pointer, but need to deal | 25 | unsigned int sa_handler; /* Really a pointer, but need to deal |
26 | with 32 bits */ | 26 | with 32 bits */ |
27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ | 27 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ |
28 | unsigned int sa_flags; | 28 | unsigned int sa_flags; |
29 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 29 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
30 | }; | 30 | }; |
31 | 31 | ||
32 | typedef struct sigaltstack_ia32 { | 32 | typedef struct sigaltstack_ia32 { |
@@ -65,7 +65,7 @@ struct stat64 { | |||
65 | long long st_size; | 65 | long long st_size; |
66 | unsigned int st_blksize; | 66 | unsigned int st_blksize; |
67 | 67 | ||
68 | long long st_blocks;/* Number 512-byte blocks allocated. */ | 68 | long long st_blocks;/* Number 512-byte blocks allocated */ |
69 | 69 | ||
70 | unsigned st_atime; | 70 | unsigned st_atime; |
71 | unsigned st_atime_nsec; | 71 | unsigned st_atime_nsec; |
@@ -77,13 +77,13 @@ struct stat64 { | |||
77 | unsigned long long st_ino; | 77 | unsigned long long st_ino; |
78 | } __attribute__((packed)); | 78 | } __attribute__((packed)); |
79 | 79 | ||
80 | typedef struct compat_siginfo{ | 80 | typedef struct compat_siginfo { |
81 | int si_signo; | 81 | int si_signo; |
82 | int si_errno; | 82 | int si_errno; |
83 | int si_code; | 83 | int si_code; |
84 | 84 | ||
85 | union { | 85 | union { |
86 | int _pad[((128/sizeof(int)) - 3)]; | 86 | int _pad[((128 / sizeof(int)) - 3)]; |
87 | 87 | ||
88 | /* kill() */ | 88 | /* kill() */ |
89 | struct { | 89 | struct { |
@@ -129,28 +129,26 @@ typedef struct compat_siginfo{ | |||
129 | } _sifields; | 129 | } _sifields; |
130 | } compat_siginfo_t; | 130 | } compat_siginfo_t; |
131 | 131 | ||
132 | struct sigframe32 | 132 | struct sigframe32 { |
133 | { | 133 | u32 pretcode; |
134 | u32 pretcode; | 134 | int sig; |
135 | int sig; | 135 | struct sigcontext_ia32 sc; |
136 | struct sigcontext_ia32 sc; | 136 | struct _fpstate_ia32 fpstate; |
137 | struct _fpstate_ia32 fpstate; | 137 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; |
138 | unsigned int extramask[_COMPAT_NSIG_WORDS-1]; | ||
139 | }; | 138 | }; |
140 | 139 | ||
141 | struct rt_sigframe32 | 140 | struct rt_sigframe32 { |
142 | { | 141 | u32 pretcode; |
143 | u32 pretcode; | 142 | int sig; |
144 | int sig; | 143 | u32 pinfo; |
145 | u32 pinfo; | 144 | u32 puc; |
146 | u32 puc; | 145 | compat_siginfo_t info; |
147 | compat_siginfo_t info; | 146 | struct ucontext_ia32 uc; |
148 | struct ucontext_ia32 uc; | 147 | struct _fpstate_ia32 fpstate; |
149 | struct _fpstate_ia32 fpstate; | ||
150 | }; | 148 | }; |
151 | 149 | ||
152 | struct ustat32 { | 150 | struct ustat32 { |
153 | __u32 f_tfree; | 151 | __u32 f_tfree; |
154 | compat_ino_t f_tinode; | 152 | compat_ino_t f_tinode; |
155 | char f_fname[6]; | 153 | char f_fname[6]; |
156 | char f_fpack[6]; | 154 | char f_fpack[6]; |
@@ -168,5 +166,5 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm); | |||
168 | #endif | 166 | #endif |
169 | 167 | ||
170 | #endif /* !CONFIG_IA32_SUPPORT */ | 168 | #endif /* !CONFIG_IA32_SUPPORT */ |
171 | 169 | ||
172 | #endif | 170 | #endif |
diff --git a/include/asm-x86/ide.h b/include/asm-x86/ide.h index c2552d8bebf7..cf9c98e5bdb5 100644 --- a/include/asm-x86/ide.h +++ b/include/asm-x86/ide.h | |||
@@ -20,8 +20,6 @@ | |||
20 | # endif | 20 | # endif |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define IDE_ARCH_OBSOLETE_DEFAULTS | ||
24 | |||
25 | static __inline__ int ide_default_irq(unsigned long base) | 23 | static __inline__ int ide_default_irq(unsigned long base) |
26 | { | 24 | { |
27 | switch (base) { | 25 | switch (base) { |
@@ -60,14 +58,6 @@ static __inline__ unsigned long ide_default_io_base(int index) | |||
60 | } | 58 | } |
61 | } | 59 | } |
62 | 60 | ||
63 | #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ | ||
64 | |||
65 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
66 | #define ide_init_default_irq(base) (0) | ||
67 | #else | ||
68 | #define ide_init_default_irq(base) ide_default_irq(base) | ||
69 | #endif | ||
70 | |||
71 | #include <asm-generic/ide_iops.h> | 61 | #include <asm-generic/ide_iops.h> |
72 | 62 | ||
73 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index 5a58b176dd61..7b292d386713 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h | |||
@@ -1,5 +1,11 @@ | |||
1 | #define ARCH_HAS_IOREMAP_WC | ||
2 | |||
1 | #ifdef CONFIG_X86_32 | 3 | #ifdef CONFIG_X86_32 |
2 | # include "io_32.h" | 4 | # include "io_32.h" |
3 | #else | 5 | #else |
4 | # include "io_64.h" | 6 | # include "io_64.h" |
5 | #endif | 7 | #endif |
8 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, | ||
9 | unsigned long prot_val); | ||
10 | extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | ||
11 | |||
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index d4d8fbd9378c..509045f5fda2 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h | |||
@@ -65,14 +65,14 @@ | |||
65 | * | 65 | * |
66 | * The returned physical address is the physical (CPU) mapping for | 66 | * The returned physical address is the physical (CPU) mapping for |
67 | * the memory address given. It is only valid to use this function on | 67 | * the memory address given. It is only valid to use this function on |
68 | * addresses directly mapped or allocated via kmalloc. | 68 | * addresses directly mapped or allocated via kmalloc. |
69 | * | 69 | * |
70 | * This function does not give bus mappings for DMA transfers. In | 70 | * This function does not give bus mappings for DMA transfers. In |
71 | * almost all conceivable cases a device driver should not be using | 71 | * almost all conceivable cases a device driver should not be using |
72 | * this function | 72 | * this function |
73 | */ | 73 | */ |
74 | 74 | ||
75 | static inline unsigned long virt_to_phys(volatile void * address) | 75 | static inline unsigned long virt_to_phys(volatile void *address) |
76 | { | 76 | { |
77 | return __pa(address); | 77 | return __pa(address); |
78 | } | 78 | } |
@@ -90,7 +90,7 @@ static inline unsigned long virt_to_phys(volatile void * address) | |||
90 | * this function | 90 | * this function |
91 | */ | 91 | */ |
92 | 92 | ||
93 | static inline void * phys_to_virt(unsigned long address) | 93 | static inline void *phys_to_virt(unsigned long address) |
94 | { | 94 | { |
95 | return __va(address); | 95 | return __va(address); |
96 | } | 96 | } |
@@ -169,16 +169,19 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | |||
169 | 169 | ||
170 | static inline unsigned char readb(const volatile void __iomem *addr) | 170 | static inline unsigned char readb(const volatile void __iomem *addr) |
171 | { | 171 | { |
172 | return *(volatile unsigned char __force *) addr; | 172 | return *(volatile unsigned char __force *)addr; |
173 | } | 173 | } |
174 | |||
174 | static inline unsigned short readw(const volatile void __iomem *addr) | 175 | static inline unsigned short readw(const volatile void __iomem *addr) |
175 | { | 176 | { |
176 | return *(volatile unsigned short __force *) addr; | 177 | return *(volatile unsigned short __force *)addr; |
177 | } | 178 | } |
179 | |||
178 | static inline unsigned int readl(const volatile void __iomem *addr) | 180 | static inline unsigned int readl(const volatile void __iomem *addr) |
179 | { | 181 | { |
180 | return *(volatile unsigned int __force *) addr; | 182 | return *(volatile unsigned int __force *) addr; |
181 | } | 183 | } |
184 | |||
182 | #define readb_relaxed(addr) readb(addr) | 185 | #define readb_relaxed(addr) readb(addr) |
183 | #define readw_relaxed(addr) readw(addr) | 186 | #define readw_relaxed(addr) readw(addr) |
184 | #define readl_relaxed(addr) readl(addr) | 187 | #define readl_relaxed(addr) readl(addr) |
@@ -188,15 +191,17 @@ static inline unsigned int readl(const volatile void __iomem *addr) | |||
188 | 191 | ||
189 | static inline void writeb(unsigned char b, volatile void __iomem *addr) | 192 | static inline void writeb(unsigned char b, volatile void __iomem *addr) |
190 | { | 193 | { |
191 | *(volatile unsigned char __force *) addr = b; | 194 | *(volatile unsigned char __force *)addr = b; |
192 | } | 195 | } |
196 | |||
193 | static inline void writew(unsigned short b, volatile void __iomem *addr) | 197 | static inline void writew(unsigned short b, volatile void __iomem *addr) |
194 | { | 198 | { |
195 | *(volatile unsigned short __force *) addr = b; | 199 | *(volatile unsigned short __force *)addr = b; |
196 | } | 200 | } |
201 | |||
197 | static inline void writel(unsigned int b, volatile void __iomem *addr) | 202 | static inline void writel(unsigned int b, volatile void __iomem *addr) |
198 | { | 203 | { |
199 | *(volatile unsigned int __force *) addr = b; | 204 | *(volatile unsigned int __force *)addr = b; |
200 | } | 205 | } |
201 | #define __raw_writeb writeb | 206 | #define __raw_writeb writeb |
202 | #define __raw_writew writew | 207 | #define __raw_writew writew |
@@ -239,12 +244,12 @@ memcpy_toio(volatile void __iomem *dst, const void *src, int count) | |||
239 | * 1. Out of order aware processors | 244 | * 1. Out of order aware processors |
240 | * 2. Accidentally out of order processors (PPro errata #51) | 245 | * 2. Accidentally out of order processors (PPro errata #51) |
241 | */ | 246 | */ |
242 | 247 | ||
243 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 248 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) |
244 | 249 | ||
245 | static inline void flush_write_buffers(void) | 250 | static inline void flush_write_buffers(void) |
246 | { | 251 | { |
247 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); | 252 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
248 | } | 253 | } |
249 | 254 | ||
250 | #else | 255 | #else |
@@ -264,7 +269,8 @@ extern void io_delay_init(void); | |||
264 | #include <asm/paravirt.h> | 269 | #include <asm/paravirt.h> |
265 | #else | 270 | #else |
266 | 271 | ||
267 | static inline void slow_down_io(void) { | 272 | static inline void slow_down_io(void) |
273 | { | ||
268 | native_io_delay(); | 274 | native_io_delay(); |
269 | #ifdef REALLY_SLOW_IO | 275 | #ifdef REALLY_SLOW_IO |
270 | native_io_delay(); | 276 | native_io_delay(); |
@@ -275,51 +281,74 @@ static inline void slow_down_io(void) { | |||
275 | 281 | ||
276 | #endif | 282 | #endif |
277 | 283 | ||
278 | #define __BUILDIO(bwl,bw,type) \ | 284 | #define __BUILDIO(bwl, bw, type) \ |
279 | static inline void out##bwl(unsigned type value, int port) { \ | 285 | static inline void out##bwl(unsigned type value, int port) \ |
280 | out##bwl##_local(value, port); \ | 286 | { \ |
281 | } \ | 287 | out##bwl##_local(value, port); \ |
282 | static inline unsigned type in##bwl(int port) { \ | 288 | } \ |
283 | return in##bwl##_local(port); \ | 289 | \ |
290 | static inline unsigned type in##bwl(int port) \ | ||
291 | { \ | ||
292 | return in##bwl##_local(port); \ | ||
284 | } | 293 | } |
285 | 294 | ||
286 | #define BUILDIO(bwl,bw,type) \ | 295 | #define BUILDIO(bwl, bw, type) \ |
287 | static inline void out##bwl##_local(unsigned type value, int port) { \ | 296 | static inline void out##bwl##_local(unsigned type value, int port) \ |
288 | __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ | 297 | { \ |
289 | } \ | 298 | asm volatile("out" #bwl " %" #bw "0, %w1" \ |
290 | static inline unsigned type in##bwl##_local(int port) { \ | 299 | : : "a"(value), "Nd"(port)); \ |
291 | unsigned type value; \ | 300 | } \ |
292 | __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ | 301 | \ |
293 | return value; \ | 302 | static inline unsigned type in##bwl##_local(int port) \ |
294 | } \ | 303 | { \ |
295 | static inline void out##bwl##_local_p(unsigned type value, int port) { \ | 304 | unsigned type value; \ |
296 | out##bwl##_local(value, port); \ | 305 | asm volatile("in" #bwl " %w1, %" #bw "0" \ |
297 | slow_down_io(); \ | 306 | : "=a"(value) : "Nd"(port)); \ |
298 | } \ | 307 | return value; \ |
299 | static inline unsigned type in##bwl##_local_p(int port) { \ | 308 | } \ |
300 | unsigned type value = in##bwl##_local(port); \ | 309 | \ |
301 | slow_down_io(); \ | 310 | static inline void out##bwl##_local_p(unsigned type value, int port) \ |
302 | return value; \ | 311 | { \ |
303 | } \ | 312 | out##bwl##_local(value, port); \ |
304 | __BUILDIO(bwl,bw,type) \ | 313 | slow_down_io(); \ |
305 | static inline void out##bwl##_p(unsigned type value, int port) { \ | 314 | } \ |
306 | out##bwl(value, port); \ | 315 | \ |
307 | slow_down_io(); \ | 316 | static inline unsigned type in##bwl##_local_p(int port) \ |
308 | } \ | 317 | { \ |
309 | static inline unsigned type in##bwl##_p(int port) { \ | 318 | unsigned type value = in##bwl##_local(port); \ |
310 | unsigned type value = in##bwl(port); \ | 319 | slow_down_io(); \ |
311 | slow_down_io(); \ | 320 | return value; \ |
312 | return value; \ | 321 | } \ |
313 | } \ | 322 | \ |
314 | static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ | 323 | __BUILDIO(bwl, bw, type) \ |
315 | __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ | 324 | \ |
316 | } \ | 325 | static inline void out##bwl##_p(unsigned type value, int port) \ |
317 | static inline void ins##bwl(int port, void *addr, unsigned long count) { \ | 326 | { \ |
318 | __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ | 327 | out##bwl(value, port); \ |
328 | slow_down_io(); \ | ||
329 | } \ | ||
330 | \ | ||
331 | static inline unsigned type in##bwl##_p(int port) \ | ||
332 | { \ | ||
333 | unsigned type value = in##bwl(port); \ | ||
334 | slow_down_io(); \ | ||
335 | return value; \ | ||
336 | } \ | ||
337 | \ | ||
338 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ | ||
339 | { \ | ||
340 | asm volatile("rep; outs" #bwl \ | ||
341 | : "+S"(addr), "+c"(count) : "d"(port)); \ | ||
342 | } \ | ||
343 | \ | ||
344 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ | ||
345 | { \ | ||
346 | asm volatile("rep; ins" #bwl \ | ||
347 | : "+D"(addr), "+c"(count) : "d"(port)); \ | ||
319 | } | 348 | } |
320 | 349 | ||
321 | BUILDIO(b,b,char) | 350 | BUILDIO(b, b, char) |
322 | BUILDIO(w,w,short) | 351 | BUILDIO(w, w, short) |
323 | BUILDIO(l,,int) | 352 | BUILDIO(l, , int) |
324 | 353 | ||
325 | #endif | 354 | #endif |
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h index db0be2011a3c..c2f5eef47b88 100644 --- a/include/asm-x86/io_64.h +++ b/include/asm-x86/io_64.h | |||
@@ -58,60 +58,75 @@ static inline void slow_down_io(void) | |||
58 | /* | 58 | /* |
59 | * Talk about misusing macros.. | 59 | * Talk about misusing macros.. |
60 | */ | 60 | */ |
61 | #define __OUT1(s,x) \ | 61 | #define __OUT1(s, x) \ |
62 | static inline void out##s(unsigned x value, unsigned short port) { | 62 | static inline void out##s(unsigned x value, unsigned short port) { |
63 | 63 | ||
64 | #define __OUT2(s,s1,s2) \ | 64 | #define __OUT2(s, s1, s2) \ |
65 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" | 65 | asm volatile ("out" #s " %" s1 "0,%" s2 "1" |
66 | 66 | ||
67 | #ifndef REALLY_SLOW_IO | 67 | #ifndef REALLY_SLOW_IO |
68 | #define REALLY_SLOW_IO | 68 | #define REALLY_SLOW_IO |
69 | #define UNSET_REALLY_SLOW_IO | 69 | #define UNSET_REALLY_SLOW_IO |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #define __OUT(s,s1,x) \ | 72 | #define __OUT(s, s1, x) \ |
73 | __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ | 73 | __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ |
74 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ | 74 | } \ |
75 | slow_down_io(); } | 75 | __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \ |
76 | slow_down_io(); \ | ||
77 | } | ||
76 | 78 | ||
77 | #define __IN1(s) \ | 79 | #define __IN1(s) \ |
78 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | 80 | static inline RETURN_TYPE in##s(unsigned short port) \ |
81 | { \ | ||
82 | RETURN_TYPE _v; | ||
79 | 83 | ||
80 | #define __IN2(s,s1,s2) \ | 84 | #define __IN2(s, s1, s2) \ |
81 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" | 85 | asm volatile ("in" #s " %" s2 "1,%" s1 "0" |
82 | 86 | ||
83 | #define __IN(s,s1,i...) \ | 87 | #define __IN(s, s1, i...) \ |
84 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \ | 88 | __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ |
85 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | 89 | return _v; \ |
86 | slow_down_io(); return _v; } | 90 | } \ |
91 | __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \ | ||
92 | slow_down_io(); \ | ||
93 | return _v; } | ||
87 | 94 | ||
88 | #ifdef UNSET_REALLY_SLOW_IO | 95 | #ifdef UNSET_REALLY_SLOW_IO |
89 | #undef REALLY_SLOW_IO | 96 | #undef REALLY_SLOW_IO |
90 | #endif | 97 | #endif |
91 | 98 | ||
92 | #define __INS(s) \ | 99 | #define __INS(s) \ |
93 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ | 100 | static inline void ins##s(unsigned short port, void *addr, \ |
94 | { __asm__ __volatile__ ("rep ; ins" #s \ | 101 | unsigned long count) \ |
95 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 102 | { \ |
103 | asm volatile ("rep ; ins" #s \ | ||
104 | : "=D" (addr), "=c" (count) \ | ||
105 | : "d" (port), "0" (addr), "1" (count)); \ | ||
106 | } | ||
96 | 107 | ||
97 | #define __OUTS(s) \ | 108 | #define __OUTS(s) \ |
98 | static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ | 109 | static inline void outs##s(unsigned short port, const void *addr, \ |
99 | { __asm__ __volatile__ ("rep ; outs" #s \ | 110 | unsigned long count) \ |
100 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 111 | { \ |
112 | asm volatile ("rep ; outs" #s \ | ||
113 | : "=S" (addr), "=c" (count) \ | ||
114 | : "d" (port), "0" (addr), "1" (count)); \ | ||
115 | } | ||
101 | 116 | ||
102 | #define RETURN_TYPE unsigned char | 117 | #define RETURN_TYPE unsigned char |
103 | __IN(b,"") | 118 | __IN(b, "") |
104 | #undef RETURN_TYPE | 119 | #undef RETURN_TYPE |
105 | #define RETURN_TYPE unsigned short | 120 | #define RETURN_TYPE unsigned short |
106 | __IN(w,"") | 121 | __IN(w, "") |
107 | #undef RETURN_TYPE | 122 | #undef RETURN_TYPE |
108 | #define RETURN_TYPE unsigned int | 123 | #define RETURN_TYPE unsigned int |
109 | __IN(l,"") | 124 | __IN(l, "") |
110 | #undef RETURN_TYPE | 125 | #undef RETURN_TYPE |
111 | 126 | ||
112 | __OUT(b,"b",char) | 127 | __OUT(b, "b", char) |
113 | __OUT(w,"w",short) | 128 | __OUT(w, "w", short) |
114 | __OUT(l,,int) | 129 | __OUT(l, , int) |
115 | 130 | ||
116 | __INS(b) | 131 | __INS(b) |
117 | __INS(w) | 132 | __INS(w) |
@@ -132,12 +147,12 @@ __OUTS(l) | |||
132 | * Change virtual addresses to physical addresses and vv. | 147 | * Change virtual addresses to physical addresses and vv. |
133 | * These are pretty trivial | 148 | * These are pretty trivial |
134 | */ | 149 | */ |
135 | static inline unsigned long virt_to_phys(volatile void * address) | 150 | static inline unsigned long virt_to_phys(volatile void *address) |
136 | { | 151 | { |
137 | return __pa(address); | 152 | return __pa(address); |
138 | } | 153 | } |
139 | 154 | ||
140 | static inline void * phys_to_virt(unsigned long address) | 155 | static inline void *phys_to_virt(unsigned long address) |
141 | { | 156 | { |
142 | return __va(address); | 157 | return __va(address); |
143 | } | 158 | } |
@@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr) | |||
200 | { | 215 | { |
201 | return *(__force volatile __u8 *)addr; | 216 | return *(__force volatile __u8 *)addr; |
202 | } | 217 | } |
218 | |||
203 | static inline __u16 __readw(const volatile void __iomem *addr) | 219 | static inline __u16 __readw(const volatile void __iomem *addr) |
204 | { | 220 | { |
205 | return *(__force volatile __u16 *)addr; | 221 | return *(__force volatile __u16 *)addr; |
206 | } | 222 | } |
223 | |||
207 | static __always_inline __u32 __readl(const volatile void __iomem *addr) | 224 | static __always_inline __u32 __readl(const volatile void __iomem *addr) |
208 | { | 225 | { |
209 | return *(__force volatile __u32 *)addr; | 226 | return *(__force volatile __u32 *)addr; |
210 | } | 227 | } |
228 | |||
211 | static inline __u64 __readq(const volatile void __iomem *addr) | 229 | static inline __u64 __readq(const volatile void __iomem *addr) |
212 | { | 230 | { |
213 | return *(__force volatile __u64 *)addr; | 231 | return *(__force volatile __u64 *)addr; |
214 | } | 232 | } |
233 | |||
215 | #define readb(x) __readb(x) | 234 | #define readb(x) __readb(x) |
216 | #define readw(x) __readw(x) | 235 | #define readw(x) __readw(x) |
217 | #define readl(x) __readl(x) | 236 | #define readl(x) __readl(x) |
@@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr) | |||
231 | { | 250 | { |
232 | *(__force volatile __u32 *)addr = b; | 251 | *(__force volatile __u32 *)addr = b; |
233 | } | 252 | } |
253 | |||
234 | static inline void __writeq(__u64 b, volatile void __iomem *addr) | 254 | static inline void __writeq(__u64 b, volatile void __iomem *addr) |
235 | { | 255 | { |
236 | *(__force volatile __u64 *)addr = b; | 256 | *(__force volatile __u64 *)addr = b; |
237 | } | 257 | } |
258 | |||
238 | static inline void __writeb(__u8 b, volatile void __iomem *addr) | 259 | static inline void __writeb(__u8 b, volatile void __iomem *addr) |
239 | { | 260 | { |
240 | *(__force volatile __u8 *)addr = b; | 261 | *(__force volatile __u8 *)addr = b; |
241 | } | 262 | } |
263 | |||
242 | static inline void __writew(__u16 b, volatile void __iomem *addr) | 264 | static inline void __writew(__u16 b, volatile void __iomem *addr) |
243 | { | 265 | { |
244 | *(__force volatile __u16 *)addr = b; | 266 | *(__force volatile __u16 *)addr = b; |
245 | } | 267 | } |
246 | #define writeq(val,addr) __writeq((val),(addr)) | 268 | |
247 | #define writel(val,addr) __writel((val),(addr)) | 269 | #define writeq(val, addr) __writeq((val), (addr)) |
248 | #define writew(val,addr) __writew((val),(addr)) | 270 | #define writel(val, addr) __writel((val), (addr)) |
249 | #define writeb(val,addr) __writeb((val),(addr)) | 271 | #define writew(val, addr) __writew((val), (addr)) |
272 | #define writeb(val, addr) __writeb((val), (addr)) | ||
250 | #define __raw_writeb writeb | 273 | #define __raw_writeb writeb |
251 | #define __raw_writew writew | 274 | #define __raw_writew writew |
252 | #define __raw_writel writel | 275 | #define __raw_writel writel |
253 | #define __raw_writeq writeq | 276 | #define __raw_writeq writeq |
254 | 277 | ||
255 | void __memcpy_fromio(void*,unsigned long,unsigned); | 278 | void __memcpy_fromio(void *, unsigned long, unsigned); |
256 | void __memcpy_toio(unsigned long,const void*,unsigned); | 279 | void __memcpy_toio(unsigned long, const void *, unsigned); |
257 | 280 | ||
258 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len) | 281 | static inline void memcpy_fromio(void *to, const volatile void __iomem *from, |
282 | unsigned len) | ||
259 | { | 283 | { |
260 | __memcpy_fromio(to,(unsigned long)from,len); | 284 | __memcpy_fromio(to, (unsigned long)from, len); |
261 | } | 285 | } |
262 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len) | 286 | |
287 | static inline void memcpy_toio(volatile void __iomem *to, const void *from, | ||
288 | unsigned len) | ||
263 | { | 289 | { |
264 | __memcpy_toio((unsigned long)to,from,len); | 290 | __memcpy_toio((unsigned long)to, from, len); |
265 | } | 291 | } |
266 | 292 | ||
267 | void memset_io(volatile void __iomem *a, int b, size_t c); | 293 | void memset_io(volatile void __iomem *a, int b, size_t c); |
@@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c); | |||
276 | */ | 302 | */ |
277 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) | 303 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) |
278 | 304 | ||
279 | #define flush_write_buffers() | 305 | #define flush_write_buffers() |
280 | 306 | ||
281 | extern int iommu_bio_merge; | 307 | extern int iommu_bio_merge; |
282 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge | 308 | #define BIO_VMERGE_BOUNDARY iommu_bio_merge |
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index 0f5b3fef0b08..0c9e17c73e05 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h | |||
@@ -110,6 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS]; | |||
110 | * MP-BIOS irq configuration table structures: | 110 | * MP-BIOS irq configuration table structures: |
111 | */ | 111 | */ |
112 | 112 | ||
113 | struct mp_ioapic_routing { | ||
114 | int apic_id; | ||
115 | int gsi_base; | ||
116 | int gsi_end; | ||
117 | u32 pin_programmed[4]; | ||
118 | }; | ||
119 | |||
113 | /* I/O APIC entries */ | 120 | /* I/O APIC entries */ |
114 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; | 121 | extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; |
115 | 122 | ||
@@ -146,7 +153,6 @@ extern int io_apic_get_version(int ioapic); | |||
146 | extern int io_apic_get_redir_entries(int ioapic); | 153 | extern int io_apic_get_redir_entries(int ioapic); |
147 | extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, | 154 | extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, |
148 | int edge_level, int active_high_low); | 155 | int edge_level, int active_high_low); |
149 | extern int timer_uses_ioapic_pin_0; | ||
150 | #endif /* CONFIG_ACPI */ | 156 | #endif /* CONFIG_ACPI */ |
151 | 157 | ||
152 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 158 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h index 93c894dc5154..c0c338bd4068 100644 --- a/include/asm-x86/ioctls.h +++ b/include/asm-x86/ioctls.h | |||
@@ -47,12 +47,13 @@ | |||
47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | 47 | #define TIOCSBRK 0x5427 /* BSD compatibility */ |
48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | 48 | #define TIOCCBRK 0x5428 /* BSD compatibility */ |
49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | 49 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ |
50 | #define TCGETS2 _IOR('T',0x2A, struct termios2) | 50 | #define TCGETS2 _IOR('T', 0x2A, struct termios2) |
51 | #define TCSETS2 _IOW('T',0x2B, struct termios2) | 51 | #define TCSETS2 _IOW('T', 0x2B, struct termios2) |
52 | #define TCSETSW2 _IOW('T',0x2C, struct termios2) | 52 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) |
53 | #define TCSETSF2 _IOW('T',0x2D, struct termios2) | 53 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) |
54 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 54 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) |
55 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 55 | /* Get Pty Number (of pty-mux device) */ |
56 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | ||
56 | 57 | ||
57 | #define FIONCLEX 0x5450 | 58 | #define FIONCLEX 0x5450 |
58 | #define FIOCLEX 0x5451 | 59 | #define FIOCLEX 0x5451 |
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h index 2adf8b39a40b..ee678fd51594 100644 --- a/include/asm-x86/ipcbuf.h +++ b/include/asm-x86/ipcbuf.h | |||
@@ -11,8 +11,7 @@ | |||
11 | * - 2 miscellaneous 32-bit values | 11 | * - 2 miscellaneous 32-bit values |
12 | */ | 12 | */ |
13 | 13 | ||
14 | struct ipc64_perm | 14 | struct ipc64_perm { |
15 | { | ||
16 | __kernel_key_t key; | 15 | __kernel_key_t key; |
17 | __kernel_uid32_t uid; | 16 | __kernel_uid32_t uid; |
18 | __kernel_gid32_t gid; | 17 | __kernel_gid32_t gid; |
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index 6d011bd6067d..ecc80f341f37 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h | |||
@@ -27,7 +27,8 @@ | |||
27 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. | 27 | * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) | 30 | static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, |
31 | unsigned int dest) | ||
31 | { | 32 | { |
32 | unsigned int icr = shortcut | dest; | 33 | unsigned int icr = shortcut | dest; |
33 | 34 | ||
@@ -42,12 +43,13 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns | |||
42 | return icr; | 43 | return icr; |
43 | } | 44 | } |
44 | 45 | ||
45 | static inline int __prepare_ICR2 (unsigned int mask) | 46 | static inline int __prepare_ICR2(unsigned int mask) |
46 | { | 47 | { |
47 | return SET_APIC_DEST_FIELD(mask); | 48 | return SET_APIC_DEST_FIELD(mask); |
48 | } | 49 | } |
49 | 50 | ||
50 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) | 51 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, |
52 | unsigned int dest) | ||
51 | { | 53 | { |
52 | /* | 54 | /* |
53 | * Subtle. In the case of the 'never do double writes' workaround | 55 | * Subtle. In the case of the 'never do double writes' workaround |
@@ -78,7 +80,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign | |||
78 | * This is used to send an IPI with no shorthand notation (the destination is | 80 | * This is used to send an IPI with no shorthand notation (the destination is |
79 | * specified in bits 56 to 63 of the ICR). | 81 | * specified in bits 56 to 63 of the ICR). |
80 | */ | 82 | */ |
81 | static inline void __send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) | 83 | static inline void __send_IPI_dest_field(unsigned int mask, int vector, |
84 | unsigned int dest) | ||
82 | { | 85 | { |
83 | unsigned long cfg; | 86 | unsigned long cfg; |
84 | 87 | ||
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h index aca9c96e8e6b..0b79f3185243 100644 --- a/include/asm-x86/irq_32.h +++ b/include/asm-x86/irq_32.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include "irq_vectors.h" | 15 | #include "irq_vectors.h" |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | 17 | ||
18 | static __inline__ int irq_canonicalize(int irq) | 18 | static inline int irq_canonicalize(int irq) |
19 | { | 19 | { |
20 | return ((irq == 2) ? 9 : irq); | 20 | return ((irq == 2) ? 9 : irq); |
21 | } | 21 | } |
diff --git a/include/asm-x86/irq_64.h b/include/asm-x86/irq_64.h index 5006c6e75656..083d35a62c94 100644 --- a/include/asm-x86/irq_64.h +++ b/include/asm-x86/irq_64.h | |||
@@ -31,10 +31,10 @@ | |||
31 | 31 | ||
32 | #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ | 32 | #define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ |
33 | 33 | ||
34 | #define NR_IRQS (NR_VECTORS + (32 *NR_CPUS)) | 34 | #define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) |
35 | #define NR_IRQ_VECTORS NR_IRQS | 35 | #define NR_IRQ_VECTORS NR_IRQS |
36 | 36 | ||
37 | static __inline__ int irq_canonicalize(int irq) | 37 | static inline int irq_canonicalize(int irq) |
38 | { | 38 | { |
39 | return ((irq == 2) ? 9 : irq); | 39 | return ((irq == 2) ? 9 : irq); |
40 | } | 40 | } |
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 0e2292483b35..c242527f970e 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h | |||
@@ -12,25 +12,21 @@ static inline unsigned long native_save_fl(void) | |||
12 | { | 12 | { |
13 | unsigned long flags; | 13 | unsigned long flags; |
14 | 14 | ||
15 | __asm__ __volatile__( | 15 | asm volatile("# __raw_save_flags\n\t" |
16 | "# __raw_save_flags\n\t" | 16 | "pushf ; pop %0" |
17 | "pushf ; pop %0" | 17 | : "=g" (flags) |
18 | : "=g" (flags) | 18 | : /* no input */ |
19 | : /* no input */ | 19 | : "memory"); |
20 | : "memory" | ||
21 | ); | ||
22 | 20 | ||
23 | return flags; | 21 | return flags; |
24 | } | 22 | } |
25 | 23 | ||
26 | static inline void native_restore_fl(unsigned long flags) | 24 | static inline void native_restore_fl(unsigned long flags) |
27 | { | 25 | { |
28 | __asm__ __volatile__( | 26 | asm volatile("push %0 ; popf" |
29 | "push %0 ; popf" | 27 | : /* no output */ |
30 | : /* no output */ | 28 | :"g" (flags) |
31 | :"g" (flags) | 29 | :"memory", "cc"); |
32 | :"memory", "cc" | ||
33 | ); | ||
34 | } | 30 | } |
35 | 31 | ||
36 | static inline void native_irq_disable(void) | 32 | static inline void native_irq_disable(void) |
@@ -70,26 +66,6 @@ static inline void raw_local_irq_restore(unsigned long flags) | |||
70 | native_restore_fl(flags); | 66 | native_restore_fl(flags); |
71 | } | 67 | } |
72 | 68 | ||
73 | #ifdef CONFIG_X86_VSMP | ||
74 | |||
75 | /* | ||
76 | * Interrupt control for the VSMP architecture: | ||
77 | */ | ||
78 | |||
79 | static inline void raw_local_irq_disable(void) | ||
80 | { | ||
81 | unsigned long flags = __raw_local_save_flags(); | ||
82 | raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
83 | } | ||
84 | |||
85 | static inline void raw_local_irq_enable(void) | ||
86 | { | ||
87 | unsigned long flags = __raw_local_save_flags(); | ||
88 | raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
89 | } | ||
90 | |||
91 | #else | ||
92 | |||
93 | static inline void raw_local_irq_disable(void) | 69 | static inline void raw_local_irq_disable(void) |
94 | { | 70 | { |
95 | native_irq_disable(); | 71 | native_irq_disable(); |
@@ -100,8 +76,6 @@ static inline void raw_local_irq_enable(void) | |||
100 | native_irq_enable(); | 76 | native_irq_enable(); |
101 | } | 77 | } |
102 | 78 | ||
103 | #endif | ||
104 | |||
105 | /* | 79 | /* |
106 | * Used in the idle loop; sti takes one instruction cycle | 80 | * Used in the idle loop; sti takes one instruction cycle |
107 | * to complete: | 81 | * to complete: |
@@ -153,23 +127,16 @@ static inline unsigned long __raw_local_irq_save(void) | |||
153 | #endif /* CONFIG_PARAVIRT */ | 127 | #endif /* CONFIG_PARAVIRT */ |
154 | 128 | ||
155 | #ifndef __ASSEMBLY__ | 129 | #ifndef __ASSEMBLY__ |
156 | #define raw_local_save_flags(flags) \ | 130 | #define raw_local_save_flags(flags) \ |
157 | do { (flags) = __raw_local_save_flags(); } while (0) | 131 | do { (flags) = __raw_local_save_flags(); } while (0) |
158 | 132 | ||
159 | #define raw_local_irq_save(flags) \ | 133 | #define raw_local_irq_save(flags) \ |
160 | do { (flags) = __raw_local_irq_save(); } while (0) | 134 | do { (flags) = __raw_local_irq_save(); } while (0) |
161 | 135 | ||
162 | #ifdef CONFIG_X86_VSMP | ||
163 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
164 | { | ||
165 | return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); | ||
166 | } | ||
167 | #else | ||
168 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 136 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
169 | { | 137 | { |
170 | return !(flags & X86_EFLAGS_IF); | 138 | return !(flags & X86_EFLAGS_IF); |
171 | } | 139 | } |
172 | #endif | ||
173 | 140 | ||
174 | static inline int raw_irqs_disabled(void) | 141 | static inline int raw_irqs_disabled(void) |
175 | { | 142 | { |
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index 99dcbafa1511..96651bb59ba1 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h | |||
@@ -20,15 +20,16 @@ enum die_val { | |||
20 | DIE_CALL, | 20 | DIE_CALL, |
21 | DIE_NMI_IPI, | 21 | DIE_NMI_IPI, |
22 | DIE_PAGE_FAULT, | 22 | DIE_PAGE_FAULT, |
23 | DIE_NMIUNKNOWN, | ||
23 | }; | 24 | }; |
24 | 25 | ||
25 | extern void printk_address(unsigned long address, int reliable); | 26 | extern void printk_address(unsigned long address, int reliable); |
26 | extern void die(const char *,struct pt_regs *,long); | 27 | extern void die(const char *, struct pt_regs *,long); |
27 | extern int __must_check __die(const char *, struct pt_regs *, long); | 28 | extern int __must_check __die(const char *, struct pt_regs *, long); |
28 | extern void show_registers(struct pt_regs *regs); | 29 | extern void show_registers(struct pt_regs *regs); |
29 | extern void __show_registers(struct pt_regs *, int all); | 30 | extern void __show_registers(struct pt_regs *, int all); |
30 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, | 31 | extern void show_trace(struct task_struct *t, struct pt_regs *regs, |
31 | unsigned long *sp, unsigned long bp); | 32 | unsigned long *sp, unsigned long bp); |
32 | extern void __show_regs(struct pt_regs *regs); | 33 | extern void __show_regs(struct pt_regs *regs); |
33 | extern void show_regs(struct pt_regs *regs); | 34 | extern void show_regs(struct pt_regs *regs); |
34 | extern unsigned long oops_begin(void); | 35 | extern unsigned long oops_begin(void); |
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index c90d3c77afc2..8f855a15f64d 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h | |||
@@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs, | |||
94 | { | 94 | { |
95 | #ifdef CONFIG_X86_32 | 95 | #ifdef CONFIG_X86_32 |
96 | newregs->sp = (unsigned long)&(oldregs->sp); | 96 | newregs->sp = (unsigned long)&(oldregs->sp); |
97 | __asm__ __volatile__( | 97 | asm volatile("xorl %%eax, %%eax\n\t" |
98 | "xorl %%eax, %%eax\n\t" | 98 | "movw %%ss, %%ax\n\t" |
99 | "movw %%ss, %%ax\n\t" | 99 | :"=a"(newregs->ss)); |
100 | :"=a"(newregs->ss)); | ||
101 | #endif | 100 | #endif |
102 | } | 101 | } |
103 | 102 | ||
@@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs, | |||
114 | crash_fixup_ss_esp(newregs, oldregs); | 113 | crash_fixup_ss_esp(newregs, oldregs); |
115 | } else { | 114 | } else { |
116 | #ifdef CONFIG_X86_32 | 115 | #ifdef CONFIG_X86_32 |
117 | __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx)); | 116 | asm volatile("movl %%ebx,%0" : "=m"(newregs->bx)); |
118 | __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx)); | 117 | asm volatile("movl %%ecx,%0" : "=m"(newregs->cx)); |
119 | __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx)); | 118 | asm volatile("movl %%edx,%0" : "=m"(newregs->dx)); |
120 | __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si)); | 119 | asm volatile("movl %%esi,%0" : "=m"(newregs->si)); |
121 | __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di)); | 120 | asm volatile("movl %%edi,%0" : "=m"(newregs->di)); |
122 | __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp)); | 121 | asm volatile("movl %%ebp,%0" : "=m"(newregs->bp)); |
123 | __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax)); | 122 | asm volatile("movl %%eax,%0" : "=m"(newregs->ax)); |
124 | __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp)); | 123 | asm volatile("movl %%esp,%0" : "=m"(newregs->sp)); |
125 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | 124 | asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
126 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | 125 | asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
127 | __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds)); | 126 | asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds)); |
128 | __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es)); | 127 | asm volatile("movl %%es, %%eax;" :"=a"(newregs->es)); |
129 | __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags)); | 128 | asm volatile("pushfl; popl %0" :"=m"(newregs->flags)); |
130 | #else | 129 | #else |
131 | __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx)); | 130 | asm volatile("movq %%rbx,%0" : "=m"(newregs->bx)); |
132 | __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx)); | 131 | asm volatile("movq %%rcx,%0" : "=m"(newregs->cx)); |
133 | __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx)); | 132 | asm volatile("movq %%rdx,%0" : "=m"(newregs->dx)); |
134 | __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si)); | 133 | asm volatile("movq %%rsi,%0" : "=m"(newregs->si)); |
135 | __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di)); | 134 | asm volatile("movq %%rdi,%0" : "=m"(newregs->di)); |
136 | __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp)); | 135 | asm volatile("movq %%rbp,%0" : "=m"(newregs->bp)); |
137 | __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax)); | 136 | asm volatile("movq %%rax,%0" : "=m"(newregs->ax)); |
138 | __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp)); | 137 | asm volatile("movq %%rsp,%0" : "=m"(newregs->sp)); |
139 | __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8)); | 138 | asm volatile("movq %%r8,%0" : "=m"(newregs->r8)); |
140 | __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9)); | 139 | asm volatile("movq %%r9,%0" : "=m"(newregs->r9)); |
141 | __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10)); | 140 | asm volatile("movq %%r10,%0" : "=m"(newregs->r10)); |
142 | __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11)); | 141 | asm volatile("movq %%r11,%0" : "=m"(newregs->r11)); |
143 | __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12)); | 142 | asm volatile("movq %%r12,%0" : "=m"(newregs->r12)); |
144 | __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13)); | 143 | asm volatile("movq %%r13,%0" : "=m"(newregs->r13)); |
145 | __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14)); | 144 | asm volatile("movq %%r14,%0" : "=m"(newregs->r14)); |
146 | __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15)); | 145 | asm volatile("movq %%r15,%0" : "=m"(newregs->r15)); |
147 | __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss)); | 146 | asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); |
148 | __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs)); | 147 | asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs)); |
149 | __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags)); | 148 | asm volatile("pushfq; popq %0" :"=m"(newregs->flags)); |
150 | #endif | 149 | #endif |
151 | newregs->ip = (unsigned long)current_text_addr(); | 150 | newregs->ip = (unsigned long)current_text_addr(); |
152 | } | 151 | } |
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h new file mode 100644 index 000000000000..484c47554f3b --- /dev/null +++ b/include/asm-x86/kgdb.h | |||
@@ -0,0 +1,81 @@ | |||
1 | #ifndef _ASM_KGDB_H_ | ||
2 | #define _ASM_KGDB_H_ | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 2001-2004 Amit S. Kale | ||
6 | * Copyright (C) 2008 Wind River Systems, Inc. | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * BUFMAX defines the maximum number of characters in inbound/outbound | ||
11 | * buffers at least NUMREGBYTES*2 are needed for register packets | ||
12 | * Longer buffer is needed to list all threads | ||
13 | */ | ||
14 | #define BUFMAX 1024 | ||
15 | |||
16 | /* | ||
17 | * Note that this register image is in a different order than | ||
18 | * the register image that Linux produces at interrupt time. | ||
19 | * | ||
20 | * Linux's register image is defined by struct pt_regs in ptrace.h. | ||
21 | * Just why GDB uses a different order is a historical mystery. | ||
22 | */ | ||
23 | #ifdef CONFIG_X86_32 | ||
24 | enum regnames { | ||
25 | GDB_AX, /* 0 */ | ||
26 | GDB_CX, /* 1 */ | ||
27 | GDB_DX, /* 2 */ | ||
28 | GDB_BX, /* 3 */ | ||
29 | GDB_SP, /* 4 */ | ||
30 | GDB_BP, /* 5 */ | ||
31 | GDB_SI, /* 6 */ | ||
32 | GDB_DI, /* 7 */ | ||
33 | GDB_PC, /* 8 also known as eip */ | ||
34 | GDB_PS, /* 9 also known as eflags */ | ||
35 | GDB_CS, /* 10 */ | ||
36 | GDB_SS, /* 11 */ | ||
37 | GDB_DS, /* 12 */ | ||
38 | GDB_ES, /* 13 */ | ||
39 | GDB_FS, /* 14 */ | ||
40 | GDB_GS, /* 15 */ | ||
41 | }; | ||
42 | #else /* ! CONFIG_X86_32 */ | ||
43 | enum regnames { | ||
44 | GDB_AX, /* 0 */ | ||
45 | GDB_DX, /* 1 */ | ||
46 | GDB_CX, /* 2 */ | ||
47 | GDB_BX, /* 3 */ | ||
48 | GDB_SI, /* 4 */ | ||
49 | GDB_DI, /* 5 */ | ||
50 | GDB_BP, /* 6 */ | ||
51 | GDB_SP, /* 7 */ | ||
52 | GDB_R8, /* 8 */ | ||
53 | GDB_R9, /* 9 */ | ||
54 | GDB_R10, /* 10 */ | ||
55 | GDB_R11, /* 11 */ | ||
56 | GDB_R12, /* 12 */ | ||
57 | GDB_R13, /* 13 */ | ||
58 | GDB_R14, /* 14 */ | ||
59 | GDB_R15, /* 15 */ | ||
60 | GDB_PC, /* 16 */ | ||
61 | GDB_PS, /* 17 */ | ||
62 | }; | ||
63 | #endif /* CONFIG_X86_32 */ | ||
64 | |||
65 | /* | ||
66 | * Number of bytes of registers: | ||
67 | */ | ||
68 | #ifdef CONFIG_X86_32 | ||
69 | # define NUMREGBYTES 64 | ||
70 | #else | ||
71 | # define NUMREGBYTES ((GDB_PS+1)*8) | ||
72 | #endif | ||
73 | |||
74 | static inline void arch_kgdb_breakpoint(void) | ||
75 | { | ||
76 | asm(" int $3"); | ||
77 | } | ||
78 | #define BREAK_INSTR_SIZE 1 | ||
79 | #define CACHE_FLUSH_IS_SAFE 1 | ||
80 | |||
81 | #endif /* _ASM_KGDB_H_ */ | ||
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h index 61ad7b5d142e..54980b0b3892 100644 --- a/include/asm-x86/kprobes.h +++ b/include/asm-x86/kprobes.h | |||
@@ -35,12 +35,12 @@ typedef u8 kprobe_opcode_t; | |||
35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | 35 | #define RELATIVEJUMP_INSTRUCTION 0xe9 |
36 | #define MAX_INSN_SIZE 16 | 36 | #define MAX_INSN_SIZE 16 |
37 | #define MAX_STACK_SIZE 64 | 37 | #define MAX_STACK_SIZE 64 |
38 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | 38 | #define MIN_STACK_SIZE(ADDR) \ |
39 | (((unsigned long)current_thread_info()) + THREAD_SIZE \ | 39 | (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ |
40 | - (unsigned long)(ADDR))) \ | 40 | THREAD_SIZE - (unsigned long)(ADDR))) \ |
41 | ? (MAX_STACK_SIZE) \ | 41 | ? (MAX_STACK_SIZE) \ |
42 | : (((unsigned long)current_thread_info()) + THREAD_SIZE \ | 42 | : (((unsigned long)current_thread_info()) + \ |
43 | - (unsigned long)(ADDR))) | 43 | THREAD_SIZE - (unsigned long)(ADDR))) |
44 | 44 | ||
45 | #define flush_insn_slot(p) do { } while (0) | 45 | #define flush_insn_slot(p) do { } while (0) |
46 | 46 | ||
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 4702b04b979a..68ee390b2844 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h | |||
@@ -22,15 +22,16 @@ | |||
22 | 22 | ||
23 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) | 23 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
24 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) | 24 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
25 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) | 25 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ |
26 | 0xFFFFFF0000000000ULL) | ||
26 | 27 | ||
27 | #define KVM_GUEST_CR0_MASK \ | 28 | #define KVM_GUEST_CR0_MASK \ |
28 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ | 29 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ |
29 | | X86_CR0_NW | X86_CR0_CD) | 30 | | X86_CR0_NW | X86_CR0_CD) |
30 | #define KVM_VM_CR0_ALWAYS_ON \ | 31 | #define KVM_VM_CR0_ALWAYS_ON \ |
31 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ | 32 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ |
32 | | X86_CR0_MP) | 33 | | X86_CR0_MP) |
33 | #define KVM_GUEST_CR4_MASK \ | 34 | #define KVM_GUEST_CR4_MASK \ |
34 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) | 35 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) |
35 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | 36 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
36 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | 37 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
@@ -133,12 +134,12 @@ struct kvm_pte_chain { | |||
133 | union kvm_mmu_page_role { | 134 | union kvm_mmu_page_role { |
134 | unsigned word; | 135 | unsigned word; |
135 | struct { | 136 | struct { |
136 | unsigned glevels : 4; | 137 | unsigned glevels:4; |
137 | unsigned level : 4; | 138 | unsigned level:4; |
138 | unsigned quadrant : 2; | 139 | unsigned quadrant:2; |
139 | unsigned pad_for_nice_hex_output : 6; | 140 | unsigned pad_for_nice_hex_output:6; |
140 | unsigned metaphysical : 1; | 141 | unsigned metaphysical:1; |
141 | unsigned access : 3; | 142 | unsigned access:3; |
142 | }; | 143 | }; |
143 | }; | 144 | }; |
144 | 145 | ||
@@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | |||
606 | #define TSS_BASE_SIZE 0x68 | 607 | #define TSS_BASE_SIZE 0x68 |
607 | #define TSS_IOPB_SIZE (65536 / 8) | 608 | #define TSS_IOPB_SIZE (65536 / 8) |
608 | #define TSS_REDIRECTION_SIZE (256 / 8) | 609 | #define TSS_REDIRECTION_SIZE (256 / 8) |
609 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | 610 | #define RMODE_TSS_SIZE \ |
611 | (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) | ||
610 | 612 | ||
611 | #endif | 613 | #endif |
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index 7db91b9bdcd4..d6337f941c98 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h | |||
@@ -68,10 +68,10 @@ struct x86_emulate_ops { | |||
68 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. | 68 | * @val: [OUT] Value read from memory, zero-extended to 'u_long'. |
69 | * @bytes: [IN ] Number of bytes to read from memory. | 69 | * @bytes: [IN ] Number of bytes to read from memory. |
70 | */ | 70 | */ |
71 | int (*read_emulated) (unsigned long addr, | 71 | int (*read_emulated)(unsigned long addr, |
72 | void *val, | 72 | void *val, |
73 | unsigned int bytes, | 73 | unsigned int bytes, |
74 | struct kvm_vcpu *vcpu); | 74 | struct kvm_vcpu *vcpu); |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * write_emulated: Read bytes from emulated/special memory area. | 77 | * write_emulated: Read bytes from emulated/special memory area. |
@@ -80,10 +80,10 @@ struct x86_emulate_ops { | |||
80 | * required). | 80 | * required). |
81 | * @bytes: [IN ] Number of bytes to write to memory. | 81 | * @bytes: [IN ] Number of bytes to write to memory. |
82 | */ | 82 | */ |
83 | int (*write_emulated) (unsigned long addr, | 83 | int (*write_emulated)(unsigned long addr, |
84 | const void *val, | 84 | const void *val, |
85 | unsigned int bytes, | 85 | unsigned int bytes, |
86 | struct kvm_vcpu *vcpu); | 86 | struct kvm_vcpu *vcpu); |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an | 89 | * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an |
@@ -93,11 +93,11 @@ struct x86_emulate_ops { | |||
93 | * @new: [IN ] Value to write to @addr. | 93 | * @new: [IN ] Value to write to @addr. |
94 | * @bytes: [IN ] Number of bytes to access using CMPXCHG. | 94 | * @bytes: [IN ] Number of bytes to access using CMPXCHG. |
95 | */ | 95 | */ |
96 | int (*cmpxchg_emulated) (unsigned long addr, | 96 | int (*cmpxchg_emulated)(unsigned long addr, |
97 | const void *old, | 97 | const void *old, |
98 | const void *new, | 98 | const void *new, |
99 | unsigned int bytes, | 99 | unsigned int bytes, |
100 | struct kvm_vcpu *vcpu); | 100 | struct kvm_vcpu *vcpu); |
101 | 101 | ||
102 | }; | 102 | }; |
103 | 103 | ||
@@ -143,7 +143,7 @@ struct x86_emulate_ctxt { | |||
143 | /* Register state before/after emulation. */ | 143 | /* Register state before/after emulation. */ |
144 | struct kvm_vcpu *vcpu; | 144 | struct kvm_vcpu *vcpu; |
145 | 145 | ||
146 | /* Linear faulting address (if emulating a page-faulting instruction). */ | 146 | /* Linear faulting address (if emulating a page-faulting instruction) */ |
147 | unsigned long eflags; | 147 | unsigned long eflags; |
148 | 148 | ||
149 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 149 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h index 9b17571e9bc3..be4a7247fa2b 100644 --- a/include/asm-x86/lguest.h +++ b/include/asm-x86/lguest.h | |||
@@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[]; | |||
34 | extern void lguest_iret(void); | 34 | extern void lguest_iret(void); |
35 | extern void lguest_init(void); | 35 | extern void lguest_init(void); |
36 | 36 | ||
37 | struct lguest_regs | 37 | struct lguest_regs { |
38 | { | ||
39 | /* Manually saved part. */ | 38 | /* Manually saved part. */ |
40 | unsigned long eax, ebx, ecx, edx; | 39 | unsigned long eax, ebx, ecx, edx; |
41 | unsigned long esi, edi, ebp; | 40 | unsigned long esi, edi, ebp; |
@@ -51,8 +50,7 @@ struct lguest_regs | |||
51 | }; | 50 | }; |
52 | 51 | ||
53 | /* This is a guest-specific page (mapped ro) into the guest. */ | 52 | /* This is a guest-specific page (mapped ro) into the guest. */ |
54 | struct lguest_ro_state | 53 | struct lguest_ro_state { |
55 | { | ||
56 | /* Host information we need to restore when we switch back. */ | 54 | /* Host information we need to restore when we switch back. */ |
57 | u32 host_cr3; | 55 | u32 host_cr3; |
58 | struct desc_ptr host_idt_desc; | 56 | struct desc_ptr host_idt_desc; |
@@ -67,8 +65,7 @@ struct lguest_ro_state | |||
67 | struct desc_struct guest_gdt[GDT_ENTRIES]; | 65 | struct desc_struct guest_gdt[GDT_ENTRIES]; |
68 | }; | 66 | }; |
69 | 67 | ||
70 | struct lg_cpu_arch | 68 | struct lg_cpu_arch { |
71 | { | ||
72 | /* The GDT entries copied into lguest_ro_state when running. */ | 69 | /* The GDT entries copied into lguest_ro_state when running. */ |
73 | struct desc_struct gdt[GDT_ENTRIES]; | 70 | struct desc_struct gdt[GDT_ENTRIES]; |
74 | 71 | ||
@@ -85,7 +82,7 @@ static inline void lguest_set_ts(void) | |||
85 | 82 | ||
86 | cr0 = read_cr0(); | 83 | cr0 = read_cr0(); |
87 | if (!(cr0 & 8)) | 84 | if (!(cr0 & 8)) |
88 | write_cr0(cr0|8); | 85 | write_cr0(cr0 | 8); |
89 | } | 86 | } |
90 | 87 | ||
91 | /* Full 4G segment descriptors, suitable for CS and DS. */ | 88 | /* Full 4G segment descriptors, suitable for CS and DS. */ |
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index f239e7069cab..a3241f28e34a 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h | |||
@@ -46,7 +46,7 @@ hcall(unsigned long call, | |||
46 | { | 46 | { |
47 | /* "int" is the Intel instruction to trigger a trap. */ | 47 | /* "int" is the Intel instruction to trigger a trap. */ |
48 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | 48 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) |
49 | /* The call in %eax (aka "a") might be overwritten */ | 49 | /* The call in %eax (aka "a") might be overwritten */ |
50 | : "=a"(call) | 50 | : "=a"(call) |
51 | /* The arguments are in %eax, %edx, %ebx & %ecx */ | 51 | /* The arguments are in %eax, %edx, %ebx & %ecx */ |
52 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) | 52 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) |
@@ -62,8 +62,7 @@ hcall(unsigned long call, | |||
62 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 62 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
63 | 63 | ||
64 | #define LHCALL_RING_SIZE 64 | 64 | #define LHCALL_RING_SIZE 64 |
65 | struct hcall_args | 65 | struct hcall_args { |
66 | { | ||
67 | /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ | 66 | /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ |
68 | unsigned long arg0, arg2, arg3, arg1; | 67 | unsigned long arg0, arg2, arg3, arg1; |
69 | }; | 68 | }; |
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h index c048353f4b85..64e444f8e85b 100644 --- a/include/asm-x86/linkage.h +++ b/include/asm-x86/linkage.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | 1 | #ifndef __ASM_LINKAGE_H |
2 | #define __ASM_LINKAGE_H | 2 | #define __ASM_LINKAGE_H |
3 | 3 | ||
4 | #undef notrace | ||
5 | #define notrace __attribute__((no_instrument_function)) | ||
6 | |||
4 | #ifdef CONFIG_X86_64 | 7 | #ifdef CONFIG_X86_64 |
5 | #define __ALIGN .p2align 4,,15 | 8 | #define __ALIGN .p2align 4,,15 |
6 | #define __ALIGN_STR ".p2align 4,,15" | 9 | #define __ALIGN_STR ".p2align 4,,15" |
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h index f852c62b3319..330a72496abd 100644 --- a/include/asm-x86/local.h +++ b/include/asm-x86/local.h | |||
@@ -18,32 +18,28 @@ typedef struct { | |||
18 | 18 | ||
19 | static inline void local_inc(local_t *l) | 19 | static inline void local_inc(local_t *l) |
20 | { | 20 | { |
21 | __asm__ __volatile__( | 21 | asm volatile(_ASM_INC "%0" |
22 | _ASM_INC "%0" | 22 | : "+m" (l->a.counter)); |
23 | :"+m" (l->a.counter)); | ||
24 | } | 23 | } |
25 | 24 | ||
26 | static inline void local_dec(local_t *l) | 25 | static inline void local_dec(local_t *l) |
27 | { | 26 | { |
28 | __asm__ __volatile__( | 27 | asm volatile(_ASM_DEC "%0" |
29 | _ASM_DEC "%0" | 28 | : "+m" (l->a.counter)); |
30 | :"+m" (l->a.counter)); | ||
31 | } | 29 | } |
32 | 30 | ||
33 | static inline void local_add(long i, local_t *l) | 31 | static inline void local_add(long i, local_t *l) |
34 | { | 32 | { |
35 | __asm__ __volatile__( | 33 | asm volatile(_ASM_ADD "%1,%0" |
36 | _ASM_ADD "%1,%0" | 34 | : "+m" (l->a.counter) |
37 | :"+m" (l->a.counter) | 35 | : "ir" (i)); |
38 | :"ir" (i)); | ||
39 | } | 36 | } |
40 | 37 | ||
41 | static inline void local_sub(long i, local_t *l) | 38 | static inline void local_sub(long i, local_t *l) |
42 | { | 39 | { |
43 | __asm__ __volatile__( | 40 | asm volatile(_ASM_SUB "%1,%0" |
44 | _ASM_SUB "%1,%0" | 41 | : "+m" (l->a.counter) |
45 | :"+m" (l->a.counter) | 42 | : "ir" (i)); |
46 | :"ir" (i)); | ||
47 | } | 43 | } |
48 | 44 | ||
49 | /** | 45 | /** |
@@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l) | |||
59 | { | 55 | { |
60 | unsigned char c; | 56 | unsigned char c; |
61 | 57 | ||
62 | __asm__ __volatile__( | 58 | asm volatile(_ASM_SUB "%2,%0; sete %1" |
63 | _ASM_SUB "%2,%0; sete %1" | 59 | : "+m" (l->a.counter), "=qm" (c) |
64 | :"+m" (l->a.counter), "=qm" (c) | 60 | : "ir" (i) : "memory"); |
65 | :"ir" (i) : "memory"); | ||
66 | return c; | 61 | return c; |
67 | } | 62 | } |
68 | 63 | ||
@@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l) | |||
78 | { | 73 | { |
79 | unsigned char c; | 74 | unsigned char c; |
80 | 75 | ||
81 | __asm__ __volatile__( | 76 | asm volatile(_ASM_DEC "%0; sete %1" |
82 | _ASM_DEC "%0; sete %1" | 77 | : "+m" (l->a.counter), "=qm" (c) |
83 | :"+m" (l->a.counter), "=qm" (c) | 78 | : : "memory"); |
84 | : : "memory"); | ||
85 | return c != 0; | 79 | return c != 0; |
86 | } | 80 | } |
87 | 81 | ||
@@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l) | |||
97 | { | 91 | { |
98 | unsigned char c; | 92 | unsigned char c; |
99 | 93 | ||
100 | __asm__ __volatile__( | 94 | asm volatile(_ASM_INC "%0; sete %1" |
101 | _ASM_INC "%0; sete %1" | 95 | : "+m" (l->a.counter), "=qm" (c) |
102 | :"+m" (l->a.counter), "=qm" (c) | 96 | : : "memory"); |
103 | : : "memory"); | ||
104 | return c != 0; | 97 | return c != 0; |
105 | } | 98 | } |
106 | 99 | ||
@@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l) | |||
117 | { | 110 | { |
118 | unsigned char c; | 111 | unsigned char c; |
119 | 112 | ||
120 | __asm__ __volatile__( | 113 | asm volatile(_ASM_ADD "%2,%0; sets %1" |
121 | _ASM_ADD "%2,%0; sets %1" | 114 | : "+m" (l->a.counter), "=qm" (c) |
122 | :"+m" (l->a.counter), "=qm" (c) | 115 | : "ir" (i) : "memory"); |
123 | :"ir" (i) : "memory"); | ||
124 | return c; | 116 | return c; |
125 | } | 117 | } |
126 | 118 | ||
@@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l) | |||
141 | #endif | 133 | #endif |
142 | /* Modern 486+ processor */ | 134 | /* Modern 486+ processor */ |
143 | __i = i; | 135 | __i = i; |
144 | __asm__ __volatile__( | 136 | asm volatile(_ASM_XADD "%0, %1;" |
145 | _ASM_XADD "%0, %1;" | 137 | : "+r" (i), "+m" (l->a.counter) |
146 | :"+r" (i), "+m" (l->a.counter) | 138 | : : "memory"); |
147 | : : "memory"); | ||
148 | return i + __i; | 139 | return i + __i; |
149 | 140 | ||
150 | #ifdef CONFIG_M386 | 141 | #ifdef CONFIG_M386 |
@@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l) | |||
182 | #define local_add_unless(l, a, u) \ | 173 | #define local_add_unless(l, a, u) \ |
183 | ({ \ | 174 | ({ \ |
184 | long c, old; \ | 175 | long c, old; \ |
185 | c = local_read(l); \ | 176 | c = local_read((l)); \ |
186 | for (;;) { \ | 177 | for (;;) { \ |
187 | if (unlikely(c == (u))) \ | 178 | if (unlikely(c == (u))) \ |
188 | break; \ | 179 | break; \ |
189 | old = local_cmpxchg((l), c, c + (a)); \ | 180 | old = local_cmpxchg((l), c, c + (a)); \ |
190 | if (likely(old == c)) \ | 181 | if (likely(old == c)) \ |
191 | break; \ | 182 | break; \ |
192 | c = old; \ | 183 | c = old; \ |
@@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l) | |||
214 | 205 | ||
215 | /* Need to disable preemption for the cpu local counters otherwise we could | 206 | /* Need to disable preemption for the cpu local counters otherwise we could |
216 | still access a variable of a previous CPU in a non atomic way. */ | 207 | still access a variable of a previous CPU in a non atomic way. */ |
217 | #define cpu_local_wrap_v(l) \ | 208 | #define cpu_local_wrap_v(l) \ |
218 | ({ local_t res__; \ | 209 | ({ \ |
219 | preempt_disable(); \ | 210 | local_t res__; \ |
220 | res__ = (l); \ | 211 | preempt_disable(); \ |
221 | preempt_enable(); \ | 212 | res__ = (l); \ |
222 | res__; }) | 213 | preempt_enable(); \ |
214 | res__; \ | ||
215 | }) | ||
223 | #define cpu_local_wrap(l) \ | 216 | #define cpu_local_wrap(l) \ |
224 | ({ preempt_disable(); \ | 217 | ({ \ |
225 | l; \ | 218 | preempt_disable(); \ |
226 | preempt_enable(); }) \ | 219 | (l); \ |
227 | 220 | preempt_enable(); \ | |
228 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | 221 | }) \ |
229 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | 222 | |
230 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | 223 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) |
231 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | 224 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) |
232 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | 225 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) |
233 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | 226 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) |
234 | 227 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) | |
235 | #define __cpu_local_inc(l) cpu_local_inc(l) | 228 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) |
236 | #define __cpu_local_dec(l) cpu_local_dec(l) | 229 | |
230 | #define __cpu_local_inc(l) cpu_local_inc((l)) | ||
231 | #define __cpu_local_dec(l) cpu_local_dec((l)) | ||
237 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | 232 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) |
238 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | 233 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) |
239 | 234 | ||
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h index 6df235e8ea91..8327907c79bf 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/mach-bigsmp/mach_apic.h | |||
@@ -1,10 +1,7 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | 4 | #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) | |
5 | extern u8 bios_cpu_apicid[]; | ||
6 | |||
7 | #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) | ||
8 | #define esr_disable (1) | 5 | #define esr_disable (1) |
9 | 6 | ||
10 | static inline int apic_id_registered(void) | 7 | static inline int apic_id_registered(void) |
@@ -90,7 +87,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
90 | static inline int cpu_present_to_apicid(int mps_cpu) | 87 | static inline int cpu_present_to_apicid(int mps_cpu) |
91 | { | 88 | { |
92 | if (mps_cpu < NR_CPUS) | 89 | if (mps_cpu < NR_CPUS) |
93 | return (int) bios_cpu_apicid[mps_cpu]; | 90 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
94 | 91 | ||
95 | return BAD_APICID; | 92 | return BAD_APICID; |
96 | } | 93 | } |
@@ -109,17 +106,6 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
109 | return cpu_physical_id(cpu); | 106 | return cpu_physical_id(cpu); |
110 | } | 107 | } |
111 | 108 | ||
112 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
113 | struct mpc_config_translation *translation_record) | ||
114 | { | ||
115 | printk("Processor #%d %u:%u APIC version %d\n", | ||
116 | m->mpc_apicid, | ||
117 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
118 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
119 | m->mpc_apicver); | ||
120 | return m->mpc_apicid; | ||
121 | } | ||
122 | |||
123 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | 109 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) |
124 | { | 110 | { |
125 | /* For clustered we don't have a good way to do this yet - hack */ | 111 | /* For clustered we don't have a good way to do this yet - hack */ |
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index e3c2c1012c1c..0a6634f62abe 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_LOCAL_APIC | ||
5 | |||
4 | #include <mach_apicdef.h> | 6 | #include <mach_apicdef.h> |
5 | #include <asm/smp.h> | 7 | #include <asm/smp.h> |
6 | 8 | ||
@@ -14,24 +16,25 @@ static inline cpumask_t target_cpus(void) | |||
14 | return cpumask_of_cpu(0); | 16 | return cpumask_of_cpu(0); |
15 | #endif | 17 | #endif |
16 | } | 18 | } |
17 | #define TARGET_CPUS (target_cpus()) | ||
18 | 19 | ||
19 | #define NO_BALANCE_IRQ (0) | 20 | #define NO_BALANCE_IRQ (0) |
20 | #define esr_disable (0) | 21 | #define esr_disable (0) |
21 | 22 | ||
23 | #ifdef CONFIG_X86_64 | ||
24 | #include <asm/genapic.h> | ||
25 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) | ||
26 | #define INT_DEST_MODE (genapic->int_dest_mode) | ||
27 | #define TARGET_CPUS (genapic->target_cpus()) | ||
28 | #define apic_id_registered (genapic->apic_id_registered) | ||
29 | #define init_apic_ldr (genapic->init_apic_ldr) | ||
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | ||
31 | #define phys_pkg_id (genapic->phys_pkg_id) | ||
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | ||
33 | extern void setup_apic_routing(void); | ||
34 | #else | ||
22 | #define INT_DELIVERY_MODE dest_LowestPrio | 35 | #define INT_DELIVERY_MODE dest_LowestPrio |
23 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ | 36 | #define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ |
24 | 37 | #define TARGET_CPUS (target_cpus()) | |
25 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | ||
26 | { | ||
27 | return physid_isset(apicid, bitmap); | ||
28 | } | ||
29 | |||
30 | static inline unsigned long check_apicid_present(int bit) | ||
31 | { | ||
32 | return physid_isset(bit, phys_cpu_present_map); | ||
33 | } | ||
34 | |||
35 | /* | 38 | /* |
36 | * Set up the logical destination ID. | 39 | * Set up the logical destination ID. |
37 | * | 40 | * |
@@ -49,23 +52,51 @@ static inline void init_apic_ldr(void) | |||
49 | apic_write_around(APIC_LDR, val); | 52 | apic_write_around(APIC_LDR, val); |
50 | } | 53 | } |
51 | 54 | ||
52 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | 55 | static inline int apic_id_registered(void) |
53 | { | 56 | { |
54 | return phys_map; | 57 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); |
58 | } | ||
59 | |||
60 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
61 | { | ||
62 | return cpus_addr(cpumask)[0]; | ||
63 | } | ||
64 | |||
65 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | ||
66 | { | ||
67 | return cpuid_apic >> index_msb; | ||
55 | } | 68 | } |
56 | 69 | ||
57 | static inline void setup_apic_routing(void) | 70 | static inline void setup_apic_routing(void) |
58 | { | 71 | { |
72 | #ifdef CONFIG_X86_IO_APIC | ||
59 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", | 73 | printk("Enabling APIC mode: %s. Using %d I/O APICs\n", |
60 | "Flat", nr_ioapics); | 74 | "Flat", nr_ioapics); |
75 | #endif | ||
61 | } | 76 | } |
62 | 77 | ||
63 | static inline int multi_timer_check(int apic, int irq) | 78 | static inline int apicid_to_node(int logical_apicid) |
64 | { | 79 | { |
65 | return 0; | 80 | return 0; |
66 | } | 81 | } |
82 | #endif | ||
67 | 83 | ||
68 | static inline int apicid_to_node(int logical_apicid) | 84 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
85 | { | ||
86 | return physid_isset(apicid, bitmap); | ||
87 | } | ||
88 | |||
89 | static inline unsigned long check_apicid_present(int bit) | ||
90 | { | ||
91 | return physid_isset(bit, phys_cpu_present_map); | ||
92 | } | ||
93 | |||
94 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | ||
95 | { | ||
96 | return phys_map; | ||
97 | } | ||
98 | |||
99 | static inline int multi_timer_check(int apic, int irq) | ||
69 | { | 100 | { |
70 | return 0; | 101 | return 0; |
71 | } | 102 | } |
@@ -78,8 +109,13 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
78 | 109 | ||
79 | static inline int cpu_present_to_apicid(int mps_cpu) | 110 | static inline int cpu_present_to_apicid(int mps_cpu) |
80 | { | 111 | { |
112 | #ifdef CONFIG_X86_64 | ||
113 | if (cpu_present(mps_cpu)) | ||
114 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | ||
115 | #else | ||
81 | if (mps_cpu < get_physical_broadcast()) | 116 | if (mps_cpu < get_physical_broadcast()) |
82 | return mps_cpu; | 117 | return mps_cpu; |
118 | #endif | ||
83 | else | 119 | else |
84 | return BAD_APICID; | 120 | return BAD_APICID; |
85 | } | 121 | } |
@@ -89,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) | |||
89 | return physid_mask_of_physid(phys_apicid); | 125 | return physid_mask_of_physid(phys_apicid); |
90 | } | 126 | } |
91 | 127 | ||
92 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
93 | struct mpc_config_translation *translation_record) | ||
94 | { | ||
95 | printk("Processor #%d %u:%u APIC version %d\n", | ||
96 | m->mpc_apicid, | ||
97 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
98 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
99 | m->mpc_apicver); | ||
100 | return m->mpc_apicid; | ||
101 | } | ||
102 | |||
103 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
104 | { | 129 | { |
105 | } | 130 | } |
@@ -109,23 +134,9 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
109 | return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); | 134 | return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); |
110 | } | 135 | } |
111 | 136 | ||
112 | static inline int apic_id_registered(void) | ||
113 | { | ||
114 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | ||
115 | } | ||
116 | |||
117 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
118 | { | ||
119 | return cpus_addr(cpumask)[0]; | ||
120 | } | ||
121 | |||
122 | static inline void enable_apic_mode(void) | 137 | static inline void enable_apic_mode(void) |
123 | { | 138 | { |
124 | } | 139 | } |
125 | 140 | ||
126 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 141 | #endif /* CONFIG_X86_LOCAL_APIC */ |
127 | { | ||
128 | return cpuid_apic >> index_msb; | ||
129 | } | ||
130 | |||
131 | #endif /* __ASM_MACH_APIC_H */ | 142 | #endif /* __ASM_MACH_APIC_H */ |
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index ae9841319094..e4b29ba37de6 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h | |||
@@ -3,10 +3,14 @@ | |||
3 | 3 | ||
4 | #include <asm/apic.h> | 4 | #include <asm/apic.h> |
5 | 5 | ||
6 | #ifdef CONFIG_X86_64 | ||
7 | #define APIC_ID_MASK (0xFFu<<24) | ||
8 | #define GET_APIC_ID(x) (((x)>>24)&0xFFu) | ||
9 | #define SET_APIC_ID(x) (((x)<<24)) | ||
10 | #else | ||
6 | #define APIC_ID_MASK (0xF<<24) | 11 | #define APIC_ID_MASK (0xF<<24) |
7 | |||
8 | static inline unsigned get_apic_id(unsigned long x) | 12 | static inline unsigned get_apic_id(unsigned long x) |
9 | { | 13 | { |
10 | unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); | 14 | unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); |
11 | if (APIC_XAPIC(ver)) | 15 | if (APIC_XAPIC(ver)) |
12 | return (((x)>>24)&0xFF); | 16 | return (((x)>>24)&0xFF); |
@@ -15,5 +19,6 @@ static inline unsigned get_apic_id(unsigned long x) | |||
15 | } | 19 | } |
16 | 20 | ||
17 | #define GET_APIC_ID(x) get_apic_id(x) | 21 | #define GET_APIC_ID(x) get_apic_id(x) |
22 | #endif | ||
18 | 23 | ||
19 | #endif | 24 | #endif |
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h index 0dba244c86db..be323364e68f 100644 --- a/include/asm-x86/mach-default/mach_ipi.h +++ b/include/asm-x86/mach-default/mach_ipi.h | |||
@@ -9,10 +9,15 @@ void __send_IPI_shortcut(unsigned int shortcut, int vector); | |||
9 | 9 | ||
10 | extern int no_broadcast; | 10 | extern int no_broadcast; |
11 | 11 | ||
12 | #ifdef CONFIG_X86_64 | ||
13 | #include <asm/genapic.h> | ||
14 | #define send_IPI_mask (genapic->send_IPI_mask) | ||
15 | #else | ||
12 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 16 | static inline void send_IPI_mask(cpumask_t mask, int vector) |
13 | { | 17 | { |
14 | send_IPI_mask_bitmask(mask, vector); | 18 | send_IPI_mask_bitmask(mask, vector); |
15 | } | 19 | } |
20 | #endif | ||
16 | 21 | ||
17 | static inline void __local_send_IPI_allbutself(int vector) | 22 | static inline void __local_send_IPI_allbutself(int vector) |
18 | { | 23 | { |
@@ -33,6 +38,10 @@ static inline void __local_send_IPI_all(int vector) | |||
33 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | 38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); |
34 | } | 39 | } |
35 | 40 | ||
41 | #ifdef CONFIG_X86_64 | ||
42 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) | ||
43 | #define send_IPI_all (genapic->send_IPI_all) | ||
44 | #else | ||
36 | static inline void send_IPI_allbutself(int vector) | 45 | static inline void send_IPI_allbutself(int vector) |
37 | { | 46 | { |
38 | /* | 47 | /* |
@@ -50,5 +59,6 @@ static inline void send_IPI_all(int vector) | |||
50 | { | 59 | { |
51 | __local_send_IPI_all(vector); | 60 | __local_send_IPI_all(vector); |
52 | } | 61 | } |
62 | #endif | ||
53 | 63 | ||
54 | #endif /* __ASM_MACH_IPI_H */ | 64 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h index 1d3832482580..d14108505bb8 100644 --- a/include/asm-x86/mach-default/mach_mpparse.h +++ b/include/asm-x86/mach-default/mach_mpparse.h | |||
@@ -1,17 +1,6 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_MACH_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
5 | struct mpc_config_translation *translation) | ||
6 | { | ||
7 | // Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
8 | } | ||
9 | |||
10 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
11 | struct mpc_config_translation *translation) | ||
12 | { | ||
13 | } | ||
14 | |||
15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 4 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
16 | char *productid) | 5 | char *productid) |
17 | { | 6 | { |
diff --git a/include/asm-x86/mach-default/mach_reboot.h b/include/asm-x86/mach-default/mach_reboot.h deleted file mode 100644 index 6adee6a97dec..000000000000 --- a/include/asm-x86/mach-default/mach_reboot.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | /* | ||
2 | * arch/i386/mach-generic/mach_reboot.h | ||
3 | * | ||
4 | * Machine specific reboot functions for generic. | ||
5 | * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp> | ||
6 | */ | ||
7 | #ifndef _MACH_REBOOT_H | ||
8 | #define _MACH_REBOOT_H | ||
9 | |||
10 | static inline void kb_wait(void) | ||
11 | { | ||
12 | int i; | ||
13 | |||
14 | for (i = 0; i < 0x10000; i++) | ||
15 | if ((inb_p(0x64) & 0x02) == 0) | ||
16 | break; | ||
17 | } | ||
18 | |||
19 | static inline void mach_reboot(void) | ||
20 | { | ||
21 | int i; | ||
22 | |||
23 | /* old method, works on most machines */ | ||
24 | for (i = 0; i < 10; i++) { | ||
25 | kb_wait(); | ||
26 | udelay(50); | ||
27 | outb(0xfe, 0x64); /* pulse reset low */ | ||
28 | udelay(50); | ||
29 | } | ||
30 | |||
31 | /* New method: sets the "System flag" which, when set, indicates | ||
32 | * successful completion of the keyboard controller self-test (Basic | ||
33 | * Assurance Test, BAT). This is needed for some machines with no | ||
34 | * keyboard plugged in. This read-modify-write sequence sets only the | ||
35 | * system flag | ||
36 | */ | ||
37 | for (i = 0; i < 10; i++) { | ||
38 | int cmd; | ||
39 | |||
40 | outb(0x20, 0x64); /* read Controller Command Byte */ | ||
41 | udelay(50); | ||
42 | kb_wait(); | ||
43 | udelay(50); | ||
44 | cmd = inb(0x60); | ||
45 | udelay(50); | ||
46 | kb_wait(); | ||
47 | udelay(50); | ||
48 | outb(0x60, 0x64); /* write Controller Command Byte */ | ||
49 | udelay(50); | ||
50 | kb_wait(); | ||
51 | udelay(50); | ||
52 | outb(cmd | 0x14, 0x60); /* set "System flag" and "Keyboard Disabled" */ | ||
53 | udelay(50); | ||
54 | kb_wait(); | ||
55 | udelay(50); | ||
56 | outb(0xfe, 0x64); /* pulse reset low */ | ||
57 | udelay(50); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | #endif /* !_MACH_REBOOT_H */ | ||
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h index 7f45f6311059..3ff2c5bff93a 100644 --- a/include/asm-x86/mach-default/smpboot_hooks.h +++ b/include/asm-x86/mach-default/smpboot_hooks.h | |||
@@ -41,4 +41,11 @@ static inline void smpboot_setup_io_apic(void) | |||
41 | */ | 41 | */ |
42 | if (!skip_ioapic_setup && nr_ioapics) | 42 | if (!skip_ioapic_setup && nr_ioapics) |
43 | setup_IO_APIC(); | 43 | setup_IO_APIC(); |
44 | else | ||
45 | nr_ioapics = 0; | ||
46 | } | ||
47 | |||
48 | static inline void smpboot_clear_io_apic(void) | ||
49 | { | ||
50 | nr_ioapics = 0; | ||
44 | } | 51 | } |
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h index d23011fdf454..fbc8ad256f5a 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/mach-es7000/mach_apic.h | |||
@@ -1,9 +1,7 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define __ASM_MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | extern u8 bios_cpu_apicid[]; | 4 | #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) |
5 | |||
6 | #define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) | ||
7 | #define esr_disable (1) | 5 | #define esr_disable (1) |
8 | 6 | ||
9 | static inline int apic_id_registered(void) | 7 | static inline int apic_id_registered(void) |
@@ -80,7 +78,7 @@ extern void enable_apic_mode(void); | |||
80 | extern int apic_version [MAX_APICS]; | 78 | extern int apic_version [MAX_APICS]; |
81 | static inline void setup_apic_routing(void) | 79 | static inline void setup_apic_routing(void) |
82 | { | 80 | { |
83 | int apic = bios_cpu_apicid[smp_processor_id()]; | 81 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
84 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 82 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
85 | (apic_version[apic] == 0x14) ? | 83 | (apic_version[apic] == 0x14) ? |
86 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); | 84 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); |
@@ -102,7 +100,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) | |||
102 | if (!mps_cpu) | 100 | if (!mps_cpu) |
103 | return boot_cpu_physical_apicid; | 101 | return boot_cpu_physical_apicid; |
104 | else if (mps_cpu < NR_CPUS) | 102 | else if (mps_cpu < NR_CPUS) |
105 | return (int) bios_cpu_apicid[mps_cpu]; | 103 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
106 | else | 104 | else |
107 | return BAD_APICID; | 105 | return BAD_APICID; |
108 | } | 106 | } |
@@ -129,16 +127,6 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
129 | #endif | 127 | #endif |
130 | } | 128 | } |
131 | 129 | ||
132 | static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) | ||
133 | { | ||
134 | printk("Processor #%d %u:%u APIC version %d\n", | ||
135 | m->mpc_apicid, | ||
136 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
137 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
138 | m->mpc_apicver); | ||
139 | return (m->mpc_apicid); | ||
140 | } | ||
141 | |||
142 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) | 130 | static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) |
143 | { | 131 | { |
144 | /* For clustered we don't have a good way to do this yet - hack */ | 132 | /* For clustered we don't have a good way to do this yet - hack */ |
@@ -153,7 +141,7 @@ static inline void setup_portio_remap(void) | |||
153 | extern unsigned int boot_cpu_physical_apicid; | 141 | extern unsigned int boot_cpu_physical_apicid; |
154 | static inline int check_phys_apicid_present(int cpu_physical_apicid) | 142 | static inline int check_phys_apicid_present(int cpu_physical_apicid) |
155 | { | 143 | { |
156 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | 144 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); |
157 | return (1); | 145 | return (1); |
158 | } | 146 | } |
159 | 147 | ||
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h index 52ee75cd0fe1..ef26d3523625 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/mach-es7000/mach_mpparse.h | |||
@@ -3,17 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/acpi.h> | 4 | #include <linux/acpi.h> |
5 | 5 | ||
6 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
7 | struct mpc_config_translation *translation) | ||
8 | { | ||
9 | Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
10 | } | ||
11 | |||
12 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
13 | struct mpc_config_translation *translation) | ||
14 | { | ||
15 | } | ||
16 | |||
17 | extern int parse_unisys_oem (char *oemptr); | 6 | extern int parse_unisys_oem (char *oemptr); |
18 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); | 7 | extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); |
19 | extern void setup_unisys(void); | 8 | extern void setup_unisys(void); |
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h index a236e7021528..6eff343e1233 100644 --- a/include/asm-x86/mach-generic/mach_apic.h +++ b/include/asm-x86/mach-generic/mach_apic.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) | 19 | #define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) |
20 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) | 20 | #define cpu_present_to_apicid (genapic->cpu_present_to_apicid) |
21 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) | 21 | #define apicid_to_cpu_present (genapic->apicid_to_cpu_present) |
22 | #define mpc_apic_id (genapic->mpc_apic_id) | ||
23 | #define setup_portio_remap (genapic->setup_portio_remap) | 22 | #define setup_portio_remap (genapic->setup_portio_remap) |
24 | #define check_apicid_present (genapic->check_apicid_present) | 23 | #define check_apicid_present (genapic->check_apicid_present) |
25 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | 24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) |
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h index dbd9fce54f4d..0d0b5ba2e9d1 100644 --- a/include/asm-x86/mach-generic/mach_mpparse.h +++ b/include/asm-x86/mach-generic/mach_mpparse.h | |||
@@ -1,11 +1,6 @@ | |||
1 | #ifndef _MACH_MPPARSE_H | 1 | #ifndef _MACH_MPPARSE_H |
2 | #define _MACH_MPPARSE_H 1 | 2 | #define _MACH_MPPARSE_H 1 |
3 | 3 | ||
4 | #include <asm/genapic.h> | ||
5 | |||
6 | #define mpc_oem_bus_info (genapic->mpc_oem_bus_info) | ||
7 | #define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) | ||
8 | |||
9 | int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); | 4 | int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); |
10 | int acpi_madt_oem_check(char *oem_id, char *oem_table_id); | 5 | int acpi_madt_oem_check(char *oem_id, char *oem_table_id); |
11 | 6 | ||
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h index 3b637fac890b..75a56e5afbe7 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/mach-numaq/mach_apic.h | |||
@@ -95,6 +95,16 @@ static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) | |||
95 | return physid_mask_of_physid(cpu + 4*node); | 95 | return physid_mask_of_physid(cpu + 4*node); |
96 | } | 96 | } |
97 | 97 | ||
98 | struct mpc_config_translation { | ||
99 | unsigned char mpc_type; | ||
100 | unsigned char trans_len; | ||
101 | unsigned char trans_type; | ||
102 | unsigned char trans_quad; | ||
103 | unsigned char trans_global; | ||
104 | unsigned char trans_local; | ||
105 | unsigned short trans_reserved; | ||
106 | }; | ||
107 | |||
98 | static inline int mpc_apic_id(struct mpc_config_processor *m, | 108 | static inline int mpc_apic_id(struct mpc_config_processor *m, |
99 | struct mpc_config_translation *translation_record) | 109 | struct mpc_config_translation *translation_record) |
100 | { | 110 | { |
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h index 51bbac8fc0c2..459b12401187 100644 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ b/include/asm-x86/mach-numaq/mach_mpparse.h | |||
@@ -1,25 +1,10 @@ | |||
1 | #ifndef __ASM_MACH_MPPARSE_H | 1 | #ifndef __ASM_MACH_MPPARSE_H |
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | 4 | extern void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, |
5 | struct mpc_config_translation *translation) | 5 | struct mpc_config_translation *translation); |
6 | { | 6 | extern void mpc_oem_pci_bus(struct mpc_config_bus *m, |
7 | int quad = translation->trans_quad; | 7 | struct mpc_config_translation *translation); |
8 | int local = translation->trans_local; | ||
9 | |||
10 | mp_bus_id_to_node[m->mpc_busid] = quad; | ||
11 | mp_bus_id_to_local[m->mpc_busid] = local; | ||
12 | printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); | ||
13 | } | ||
14 | |||
15 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
16 | struct mpc_config_translation *translation) | ||
17 | { | ||
18 | int quad = translation->trans_quad; | ||
19 | int local = translation->trans_local; | ||
20 | |||
21 | quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; | ||
22 | } | ||
23 | 8 | ||
24 | /* Hook from generic ACPI tables.c */ | 9 | /* Hook from generic ACPI tables.c */ |
25 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 10 | static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h index 062c97f6100b..1f76c2e70232 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/mach-summit/mach_apic.h | |||
@@ -40,7 +40,6 @@ static inline unsigned long check_apicid_present(int bit) | |||
40 | 40 | ||
41 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) | 41 | #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) |
42 | 42 | ||
43 | extern u8 bios_cpu_apicid[]; | ||
44 | extern u8 cpu_2_logical_apicid[]; | 43 | extern u8 cpu_2_logical_apicid[]; |
45 | 44 | ||
46 | static inline void init_apic_ldr(void) | 45 | static inline void init_apic_ldr(void) |
@@ -110,7 +109,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
110 | static inline int cpu_present_to_apicid(int mps_cpu) | 109 | static inline int cpu_present_to_apicid(int mps_cpu) |
111 | { | 110 | { |
112 | if (mps_cpu < NR_CPUS) | 111 | if (mps_cpu < NR_CPUS) |
113 | return (int)bios_cpu_apicid[mps_cpu]; | 112 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
114 | else | 113 | else |
115 | return BAD_APICID; | 114 | return BAD_APICID; |
116 | } | 115 | } |
@@ -126,17 +125,6 @@ static inline physid_mask_t apicid_to_cpu_present(int apicid) | |||
126 | return physid_mask_of_physid(0); | 125 | return physid_mask_of_physid(0); |
127 | } | 126 | } |
128 | 127 | ||
129 | static inline int mpc_apic_id(struct mpc_config_processor *m, | ||
130 | struct mpc_config_translation *translation_record) | ||
131 | { | ||
132 | printk("Processor #%d %u:%u APIC version %d\n", | ||
133 | m->mpc_apicid, | ||
134 | (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, | ||
135 | (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, | ||
136 | m->mpc_apicver); | ||
137 | return m->mpc_apicid; | ||
138 | } | ||
139 | |||
140 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
141 | { | 129 | { |
142 | } | 130 | } |
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h index c2520539d934..fdf591701339 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/mach-summit/mach_mpparse.h | |||
@@ -12,17 +12,6 @@ extern void setup_summit(void); | |||
12 | #define setup_summit() {} | 12 | #define setup_summit() {} |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, | ||
16 | struct mpc_config_translation *translation) | ||
17 | { | ||
18 | Dprintk("Bus #%d is %s\n", m->mpc_busid, name); | ||
19 | } | ||
20 | |||
21 | static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, | ||
22 | struct mpc_config_translation *translation) | ||
23 | { | ||
24 | } | ||
25 | |||
26 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
27 | char *productid) | 16 | char *productid) |
28 | { | 17 | { |
diff --git a/include/asm-x86/mach-visws/mach_apic.h b/include/asm-x86/mach-visws/mach_apic.h index efac6f0d139f..a9ef33a8a995 100644 --- a/include/asm-x86/mach-visws/mach_apic.h +++ b/include/asm-x86/mach-visws/mach_apic.h | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | static inline int apic_id_registered(void) | 24 | static inline int apic_id_registered(void) |
25 | { | 25 | { |
26 | return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); | 26 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); |
27 | } | 27 | } |
28 | 28 | ||
29 | /* | 29 | /* |
diff --git a/include/asm-x86/mach-visws/smpboot_hooks.h b/include/asm-x86/mach-visws/smpboot_hooks.h index d926471fa359..c9b83e395a2e 100644 --- a/include/asm-x86/mach-visws/smpboot_hooks.h +++ b/include/asm-x86/mach-visws/smpboot_hooks.h | |||
@@ -22,3 +22,7 @@ static inline void smpboot_restore_warm_reset_vector(void) | |||
22 | static inline void smpboot_setup_io_apic(void) | 22 | static inline void smpboot_setup_io_apic(void) |
23 | { | 23 | { |
24 | } | 24 | } |
25 | |||
26 | static inline void smpboot_clear_io_apic(void) | ||
27 | { | ||
28 | } | ||
diff --git a/include/asm-x86/mach_apic.h b/include/asm-x86/mach_apic.h deleted file mode 100644 index 7b7115a0c1c9..000000000000 --- a/include/asm-x86/mach_apic.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | #ifndef __ASM_MACH_APIC_H | ||
2 | #define __ASM_MACH_APIC_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 2004 James Cleverdon, IBM. | ||
6 | * Subject to the GNU Public License, v.2 | ||
7 | * | ||
8 | * Generic APIC sub-arch defines. | ||
9 | * | ||
10 | * Hacked for x86-64 by James Cleverdon from i386 architecture code by | ||
11 | * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and | ||
12 | * James Cleverdon. | ||
13 | */ | ||
14 | |||
15 | #include <asm/genapic.h> | ||
16 | |||
17 | #define INT_DELIVERY_MODE (genapic->int_delivery_mode) | ||
18 | #define INT_DEST_MODE (genapic->int_dest_mode) | ||
19 | #define TARGET_CPUS (genapic->target_cpus()) | ||
20 | #define vector_allocation_domain (genapic->vector_allocation_domain) | ||
21 | #define apic_id_registered (genapic->apic_id_registered) | ||
22 | #define init_apic_ldr (genapic->init_apic_ldr) | ||
23 | #define send_IPI_mask (genapic->send_IPI_mask) | ||
24 | #define send_IPI_allbutself (genapic->send_IPI_allbutself) | ||
25 | #define send_IPI_all (genapic->send_IPI_all) | ||
26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | ||
27 | #define phys_pkg_id (genapic->phys_pkg_id) | ||
28 | |||
29 | #endif /* __ASM_MACH_APIC_H */ | ||
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h index cdd9f965835a..daf1ccde77af 100644 --- a/include/asm-x86/mc146818rtc.h +++ b/include/asm-x86/mc146818rtc.h | |||
@@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock; | |||
42 | static inline void lock_cmos(unsigned char reg) | 42 | static inline void lock_cmos(unsigned char reg) |
43 | { | 43 | { |
44 | unsigned long new; | 44 | unsigned long new; |
45 | new = ((smp_processor_id()+1) << 8) | reg; | 45 | new = ((smp_processor_id() + 1) << 8) | reg; |
46 | for (;;) { | 46 | for (;;) { |
47 | if (cmos_lock) { | 47 | if (cmos_lock) { |
48 | cpu_relax(); | 48 | cpu_relax(); |
@@ -57,22 +57,26 @@ static inline void unlock_cmos(void) | |||
57 | { | 57 | { |
58 | cmos_lock = 0; | 58 | cmos_lock = 0; |
59 | } | 59 | } |
60 | |||
60 | static inline int do_i_have_lock_cmos(void) | 61 | static inline int do_i_have_lock_cmos(void) |
61 | { | 62 | { |
62 | return (cmos_lock >> 8) == (smp_processor_id()+1); | 63 | return (cmos_lock >> 8) == (smp_processor_id() + 1); |
63 | } | 64 | } |
65 | |||
64 | static inline unsigned char current_lock_cmos_reg(void) | 66 | static inline unsigned char current_lock_cmos_reg(void) |
65 | { | 67 | { |
66 | return cmos_lock & 0xff; | 68 | return cmos_lock & 0xff; |
67 | } | 69 | } |
68 | #define lock_cmos_prefix(reg) \ | 70 | |
71 | #define lock_cmos_prefix(reg) \ | ||
69 | do { \ | 72 | do { \ |
70 | unsigned long cmos_flags; \ | 73 | unsigned long cmos_flags; \ |
71 | local_irq_save(cmos_flags); \ | 74 | local_irq_save(cmos_flags); \ |
72 | lock_cmos(reg) | 75 | lock_cmos(reg) |
73 | #define lock_cmos_suffix(reg) \ | 76 | |
74 | unlock_cmos(); \ | 77 | #define lock_cmos_suffix(reg) \ |
75 | local_irq_restore(cmos_flags); \ | 78 | unlock_cmos(); \ |
79 | local_irq_restore(cmos_flags); \ | ||
76 | } while (0) | 80 | } while (0) |
77 | #else | 81 | #else |
78 | #define lock_cmos_prefix(reg) do {} while (0) | 82 | #define lock_cmos_prefix(reg) do {} while (0) |
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h index fbb1f3b71279..c3dca6edc6b1 100644 --- a/include/asm-x86/mca_dma.h +++ b/include/asm-x86/mca_dma.h | |||
@@ -12,18 +12,18 @@ | |||
12 | * count by 2 when using 16-bit dma; that is not handled by these functions. | 12 | * count by 2 when using 16-bit dma; that is not handled by these functions. |
13 | * | 13 | * |
14 | * Ramen Noodles are yummy. | 14 | * Ramen Noodles are yummy. |
15 | * | 15 | * |
16 | * 1998 Tymm Twillman <tymm@computer.org> | 16 | * 1998 Tymm Twillman <tymm@computer.org> |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Registers that are used by the DMA controller; FN is the function register | 20 | * Registers that are used by the DMA controller; FN is the function register |
21 | * (tell the controller what to do) and EXE is the execution register (how | 21 | * (tell the controller what to do) and EXE is the execution register (how |
22 | * to do it) | 22 | * to do it) |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define MCA_DMA_REG_FN 0x18 | 25 | #define MCA_DMA_REG_FN 0x18 |
26 | #define MCA_DMA_REG_EXE 0x1A | 26 | #define MCA_DMA_REG_EXE 0x1A |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Functions that the DMA controller can do | 29 | * Functions that the DMA controller can do |
@@ -43,9 +43,9 @@ | |||
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Modes (used by setting MCA_DMA_FN_MODE in the function register) | 45 | * Modes (used by setting MCA_DMA_FN_MODE in the function register) |
46 | * | 46 | * |
47 | * Note that the MODE_READ is read from memory (write to device), and | 47 | * Note that the MODE_READ is read from memory (write to device), and |
48 | * MODE_WRITE is vice-versa. | 48 | * MODE_WRITE is vice-versa. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | #define MCA_DMA_MODE_XFER 0x04 /* read by default */ | 51 | #define MCA_DMA_MODE_XFER 0x04 /* read by default */ |
@@ -63,7 +63,7 @@ | |||
63 | * IRQ context. | 63 | * IRQ context. |
64 | */ | 64 | */ |
65 | 65 | ||
66 | static __inline__ void mca_enable_dma(unsigned int dmanr) | 66 | static inline void mca_enable_dma(unsigned int dmanr) |
67 | { | 67 | { |
68 | outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); | 68 | outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); |
69 | } | 69 | } |
@@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr) | |||
76 | * IRQ context. | 76 | * IRQ context. |
77 | */ | 77 | */ |
78 | 78 | ||
79 | static __inline__ void mca_disable_dma(unsigned int dmanr) | 79 | static inline void mca_disable_dma(unsigned int dmanr) |
80 | { | 80 | { |
81 | outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); | 81 | outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); |
82 | } | 82 | } |
@@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr) | |||
87 | * @a: 24bit bus address | 87 | * @a: 24bit bus address |
88 | * | 88 | * |
89 | * Load the address register in the DMA controller. This has a 24bit | 89 | * Load the address register in the DMA controller. This has a 24bit |
90 | * limitation (16Mb). | 90 | * limitation (16Mb). |
91 | */ | 91 | */ |
92 | 92 | ||
93 | static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) | 93 | static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a) |
94 | { | 94 | { |
95 | outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); | 95 | outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); |
96 | outb(a & 0xff, MCA_DMA_REG_EXE); | 96 | outb(a & 0xff, MCA_DMA_REG_EXE); |
@@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) | |||
106 | * limitation (16Mb). The return is a bus address. | 106 | * limitation (16Mb). The return is a bus address. |
107 | */ | 107 | */ |
108 | 108 | ||
109 | static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) | 109 | static inline unsigned int mca_get_dma_addr(unsigned int dmanr) |
110 | { | 110 | { |
111 | unsigned int addr; | 111 | unsigned int addr; |
112 | 112 | ||
113 | outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); | 113 | outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); |
114 | addr = inb(MCA_DMA_REG_EXE); | 114 | addr = inb(MCA_DMA_REG_EXE); |
115 | addr |= inb(MCA_DMA_REG_EXE) << 8; | 115 | addr |= inb(MCA_DMA_REG_EXE) << 8; |
116 | addr |= inb(MCA_DMA_REG_EXE) << 16; | 116 | addr |= inb(MCA_DMA_REG_EXE) << 16; |
117 | 117 | ||
118 | return addr; | 118 | return addr; |
119 | } | 119 | } |
@@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) | |||
127 | * Setting a count of zero will not do what you expect. | 127 | * Setting a count of zero will not do what you expect. |
128 | */ | 128 | */ |
129 | 129 | ||
130 | static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) | 130 | static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count) |
131 | { | 131 | { |
132 | count--; /* transfers one more than count -- correct for this */ | 132 | count--; /* transfers one more than count -- correct for this */ |
133 | 133 | ||
@@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) | |||
144 | * on this DMA channel. | 144 | * on this DMA channel. |
145 | */ | 145 | */ |
146 | 146 | ||
147 | static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) | 147 | static inline unsigned int mca_get_dma_residue(unsigned int dmanr) |
148 | { | 148 | { |
149 | unsigned short count; | 149 | unsigned short count; |
150 | 150 | ||
@@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) | |||
164 | * with an I/O port target. | 164 | * with an I/O port target. |
165 | */ | 165 | */ |
166 | 166 | ||
167 | static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) | 167 | static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) |
168 | { | 168 | { |
169 | /* | 169 | /* |
170 | * DMA from a port address -- set the io address | 170 | * DMA from a port address -- set the io address |
171 | */ | 171 | */ |
172 | 172 | ||
173 | outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); | 173 | outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); |
174 | outb(io_addr & 0xff, MCA_DMA_REG_EXE); | 174 | outb(io_addr & 0xff, MCA_DMA_REG_EXE); |
175 | outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); | 175 | outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); |
@@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) | |||
192 | * %MCA_DMA_MODE_16 to do 16bit transfers. | 192 | * %MCA_DMA_MODE_16 to do 16bit transfers. |
193 | */ | 193 | */ |
194 | 194 | ||
195 | static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) | 195 | static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) |
196 | { | 196 | { |
197 | outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); | 197 | outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); |
198 | outb(mode, MCA_DMA_REG_EXE); | 198 | outb(mode, MCA_DMA_REG_EXE); |
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index efa962c38897..00e88679e11f 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h | |||
@@ -10,10 +10,10 @@ | |||
10 | * | 10 | * |
11 | * cpu_vm_mask is used to optimize ldt flushing. | 11 | * cpu_vm_mask is used to optimize ldt flushing. |
12 | */ | 12 | */ |
13 | typedef struct { | 13 | typedef struct { |
14 | void *ldt; | 14 | void *ldt; |
15 | #ifdef CONFIG_X86_64 | 15 | #ifdef CONFIG_X86_64 |
16 | rwlock_t ldtlock; | 16 | rwlock_t ldtlock; |
17 | #endif | 17 | #endif |
18 | int size; | 18 | int size; |
19 | struct mutex lock; | 19 | struct mutex lock; |
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 8198d1cca1f3..9756ae0f1dd3 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h | |||
@@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
62 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | 62 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); |
63 | 63 | ||
64 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 64 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
65 | /* We were in lazy tlb mode and leave_mm disabled | 65 | /* We were in lazy tlb mode and leave_mm disabled |
66 | * tlb flush IPI delivery. We must reload %cr3. | 66 | * tlb flush IPI delivery. We must reload %cr3. |
67 | */ | 67 | */ |
68 | load_cr3(next->pgd); | 68 | load_cr3(next->pgd); |
@@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev, | |||
75 | #define deactivate_mm(tsk, mm) \ | 75 | #define deactivate_mm(tsk, mm) \ |
76 | asm("movl %0,%%gs": :"r" (0)); | 76 | asm("movl %0,%%gs": :"r" (0)); |
77 | 77 | ||
78 | #define activate_mm(prev, next) \ | 78 | #define activate_mm(prev, next) \ |
79 | do { \ | 79 | do { \ |
80 | paravirt_activate_mm(prev, next); \ | 80 | paravirt_activate_mm((prev), (next)); \ |
81 | switch_mm((prev),(next),NULL); \ | 81 | switch_mm((prev), (next), NULL); \ |
82 | } while(0); | 82 | } while (0); |
83 | 83 | ||
84 | #endif | 84 | #endif |
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h index ad6dc821ef9e..ca44c71e7fb3 100644 --- a/include/asm-x86/mmu_context_64.h +++ b/include/asm-x86/mmu_context_64.h | |||
@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm); | |||
20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 20 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
21 | { | 21 | { |
22 | #ifdef CONFIG_SMP | 22 | #ifdef CONFIG_SMP |
23 | if (read_pda(mmu_state) == TLBSTATE_OK) | 23 | if (read_pda(mmu_state) == TLBSTATE_OK) |
24 | write_pda(mmu_state, TLBSTATE_LAZY); | 24 | write_pda(mmu_state, TLBSTATE_LAZY); |
25 | #endif | 25 | #endif |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 28 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
29 | struct task_struct *tsk) | 29 | struct task_struct *tsk) |
30 | { | 30 | { |
31 | unsigned cpu = smp_processor_id(); | 31 | unsigned cpu = smp_processor_id(); |
@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
39 | cpu_set(cpu, next->cpu_vm_mask); | 39 | cpu_set(cpu, next->cpu_vm_mask); |
40 | load_cr3(next->pgd); | 40 | load_cr3(next->pgd); |
41 | 41 | ||
42 | if (unlikely(next->context.ldt != prev->context.ldt)) | 42 | if (unlikely(next->context.ldt != prev->context.ldt)) |
43 | load_LDT_nolock(&next->context); | 43 | load_LDT_nolock(&next->context); |
44 | } | 44 | } |
45 | #ifdef CONFIG_SMP | 45 | #ifdef CONFIG_SMP |
@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
48 | if (read_pda(active_mm) != next) | 48 | if (read_pda(active_mm) != next) |
49 | BUG(); | 49 | BUG(); |
50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 50 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
51 | /* We were in lazy tlb mode and leave_mm disabled | 51 | /* We were in lazy tlb mode and leave_mm disabled |
52 | * tlb flush IPI delivery. We must reload CR3 | 52 | * tlb flush IPI delivery. We must reload CR3 |
53 | * to make sure to use no freed page tables. | 53 | * to make sure to use no freed page tables. |
54 | */ | 54 | */ |
@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
59 | #endif | 59 | #endif |
60 | } | 60 | } |
61 | 61 | ||
62 | #define deactivate_mm(tsk,mm) do { \ | 62 | #define deactivate_mm(tsk, mm) \ |
63 | load_gs_index(0); \ | 63 | do { \ |
64 | asm volatile("movl %0,%%fs"::"r"(0)); \ | 64 | load_gs_index(0); \ |
65 | } while(0) | 65 | asm volatile("movl %0,%%fs"::"r"(0)); \ |
66 | } while (0) | ||
66 | 67 | ||
67 | #define activate_mm(prev, next) \ | 68 | #define activate_mm(prev, next) \ |
68 | switch_mm((prev),(next),NULL) | 69 | switch_mm((prev), (next), NULL) |
69 | 70 | ||
70 | 71 | ||
71 | #endif | 72 | #endif |
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h index 46b71da99869..940881218ff8 100644 --- a/include/asm-x86/mmx.h +++ b/include/asm-x86/mmx.h | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | 9 | ||
10 | extern void *_mmx_memcpy(void *to, const void *from, size_t size); | 10 | extern void *_mmx_memcpy(void *to, const void *from, size_t size); |
11 | extern void mmx_clear_page(void *page); | 11 | extern void mmx_clear_page(void *page); |
12 | extern void mmx_copy_page(void *to, void *from); | 12 | extern void mmx_copy_page(void *to, void *from); |
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index 274a59566c45..cb2cad0b65a7 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h | |||
@@ -18,7 +18,7 @@ extern struct pglist_data *node_data[]; | |||
18 | #include <asm/srat.h> | 18 | #include <asm/srat.h> |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | extern int get_memcfg_numa_flat(void ); | 21 | extern int get_memcfg_numa_flat(void); |
22 | /* | 22 | /* |
23 | * This allows any one NUMA architecture to be compiled | 23 | * This allows any one NUMA architecture to be compiled |
24 | * for, and still fall back to the flat function if it | 24 | * for, and still fall back to the flat function if it |
@@ -129,7 +129,7 @@ static inline int pfn_valid(int pfn) | |||
129 | struct pglist_data __maybe_unused \ | 129 | struct pglist_data __maybe_unused \ |
130 | *__alloc_bootmem_node__pgdat = (pgdat); \ | 130 | *__alloc_bootmem_node__pgdat = (pgdat); \ |
131 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ | 131 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ |
132 | __pa(MAX_DMA_ADDRESS)) \ | 132 | __pa(MAX_DMA_ADDRESS)); \ |
133 | }) | 133 | }) |
134 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 134 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
135 | ({ \ | 135 | ({ \ |
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h index ebaf9663aa8a..594bd0dc1d08 100644 --- a/include/asm-x86/mmzone_64.h +++ b/include/asm-x86/mmzone_64.h | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #ifdef CONFIG_NUMA | 8 | #ifdef CONFIG_NUMA |
9 | 9 | ||
10 | #define VIRTUAL_BUG_ON(x) | 10 | #define VIRTUAL_BUG_ON(x) |
11 | 11 | ||
12 | #include <asm/smp.h> | 12 | #include <asm/smp.h> |
13 | 13 | ||
@@ -16,7 +16,7 @@ struct memnode { | |||
16 | int shift; | 16 | int shift; |
17 | unsigned int mapsize; | 17 | unsigned int mapsize; |
18 | s16 *map; | 18 | s16 *map; |
19 | s16 embedded_map[64-8]; | 19 | s16 embedded_map[64 - 8]; |
20 | } ____cacheline_aligned; /* total size = 128 bytes */ | 20 | } ____cacheline_aligned; /* total size = 128 bytes */ |
21 | extern struct memnode memnode; | 21 | extern struct memnode memnode; |
22 | #define memnode_shift memnode.shift | 22 | #define memnode_shift memnode.shift |
@@ -25,27 +25,27 @@ extern struct memnode memnode; | |||
25 | 25 | ||
26 | extern struct pglist_data *node_data[]; | 26 | extern struct pglist_data *node_data[]; |
27 | 27 | ||
28 | static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | 28 | static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) |
29 | { | 29 | { |
30 | unsigned nid; | 30 | unsigned nid; |
31 | VIRTUAL_BUG_ON(!memnodemap); | 31 | VIRTUAL_BUG_ON(!memnodemap); |
32 | VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize); | 32 | VIRTUAL_BUG_ON((addr >> memnode_shift) >= memnodemapsize); |
33 | nid = memnodemap[addr >> memnode_shift]; | 33 | nid = memnodemap[addr >> memnode_shift]; |
34 | VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); | 34 | VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]); |
35 | return nid; | 35 | return nid; |
36 | } | 36 | } |
37 | 37 | ||
38 | #define NODE_DATA(nid) (node_data[nid]) | 38 | #define NODE_DATA(nid) (node_data[nid]) |
39 | 39 | ||
40 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 40 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
41 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ | 41 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ |
42 | NODE_DATA(nid)->node_spanned_pages) | 42 | NODE_DATA(nid)->node_spanned_pages) |
43 | 43 | ||
44 | extern int early_pfn_to_nid(unsigned long pfn); | 44 | extern int early_pfn_to_nid(unsigned long pfn); |
45 | 45 | ||
46 | #ifdef CONFIG_NUMA_EMU | 46 | #ifdef CONFIG_NUMA_EMU |
47 | #define FAKE_NODE_MIN_SIZE (64*1024*1024) | 47 | #define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024) |
48 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1uL)) | 48 | #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #endif | 51 | #endif |
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 781ad74ab9e9..57a991b9c053 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h | |||
@@ -1,16 +1,13 @@ | |||
1 | #ifndef _AM_X86_MPSPEC_H | 1 | #ifndef _AM_X86_MPSPEC_H |
2 | #define _AM_X86_MPSPEC_H | 2 | #define _AM_X86_MPSPEC_H |
3 | 3 | ||
4 | #include <linux/init.h> | ||
5 | |||
4 | #include <asm/mpspec_def.h> | 6 | #include <asm/mpspec_def.h> |
5 | 7 | ||
6 | #ifdef CONFIG_X86_32 | 8 | #ifdef CONFIG_X86_32 |
7 | #include <mach_mpspec.h> | 9 | #include <mach_mpspec.h> |
8 | 10 | ||
9 | extern int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
10 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | ||
11 | extern int mp_bus_id_to_local[MAX_MP_BUSSES]; | ||
12 | extern int quad_local_to_mp_bus_id[NR_CPUS/4][4]; | ||
13 | |||
14 | extern unsigned int def_to_bigsmp; | 11 | extern unsigned int def_to_bigsmp; |
15 | extern int apic_version[MAX_APICS]; | 12 | extern int apic_version[MAX_APICS]; |
16 | extern u8 apicid_2_node[]; | 13 | extern u8 apicid_2_node[]; |
@@ -24,27 +21,30 @@ extern int pic_mode; | |||
24 | /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ | 21 | /* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */ |
25 | #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) | 22 | #define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4) |
26 | 23 | ||
27 | extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | 24 | extern void early_find_smp_config(void); |
25 | extern void early_get_smp_config(void); | ||
28 | 26 | ||
29 | #endif | 27 | #endif |
30 | 28 | ||
29 | #if defined(CONFIG_MCA) || defined(CONFIG_EISA) | ||
30 | extern int mp_bus_id_to_type[MAX_MP_BUSSES]; | ||
31 | #endif | ||
32 | |||
33 | extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); | ||
34 | |||
31 | extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; | 35 | extern int mp_bus_id_to_pci_bus[MAX_MP_BUSSES]; |
32 | 36 | ||
33 | extern unsigned int boot_cpu_physical_apicid; | 37 | extern unsigned int boot_cpu_physical_apicid; |
34 | extern int smp_found_config; | 38 | extern int smp_found_config; |
35 | extern int nr_ioapics; | ||
36 | extern int mp_irq_entries; | ||
37 | extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; | ||
38 | extern int mpc_default_type; | 39 | extern int mpc_default_type; |
39 | extern unsigned long mp_lapic_addr; | 40 | extern unsigned long mp_lapic_addr; |
40 | 41 | ||
41 | extern void find_smp_config(void); | 42 | extern void find_smp_config(void); |
42 | extern void get_smp_config(void); | 43 | extern void get_smp_config(void); |
43 | 44 | ||
45 | void __cpuinit generic_processor_info(int apicid, int version); | ||
44 | #ifdef CONFIG_ACPI | 46 | #ifdef CONFIG_ACPI |
45 | extern void mp_register_lapic(u8 id, u8 enabled); | 47 | extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); |
46 | extern void mp_register_lapic_address(u64 address); | ||
47 | extern void mp_register_ioapic(u8 id, u32 address, u32 gsi_base); | ||
48 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | 48 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, |
49 | u32 gsi); | 49 | u32 gsi); |
50 | extern void mp_config_acpi_legacy_irqs(void); | 50 | extern void mp_config_acpi_legacy_irqs(void); |
@@ -53,8 +53,7 @@ extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low); | |||
53 | 53 | ||
54 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) | 54 | #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) |
55 | 55 | ||
56 | struct physid_mask | 56 | struct physid_mask { |
57 | { | ||
58 | unsigned long mask[PHYSID_ARRAY_SIZE]; | 57 | unsigned long mask[PHYSID_ARRAY_SIZE]; |
59 | }; | 58 | }; |
60 | 59 | ||
@@ -63,34 +62,34 @@ typedef struct physid_mask physid_mask_t; | |||
63 | #define physid_set(physid, map) set_bit(physid, (map).mask) | 62 | #define physid_set(physid, map) set_bit(physid, (map).mask) |
64 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) | 63 | #define physid_clear(physid, map) clear_bit(physid, (map).mask) |
65 | #define physid_isset(physid, map) test_bit(physid, (map).mask) | 64 | #define physid_isset(physid, map) test_bit(physid, (map).mask) |
66 | #define physid_test_and_set(physid, map) \ | 65 | #define physid_test_and_set(physid, map) \ |
67 | test_and_set_bit(physid, (map).mask) | 66 | test_and_set_bit(physid, (map).mask) |
68 | 67 | ||
69 | #define physids_and(dst, src1, src2) \ | 68 | #define physids_and(dst, src1, src2) \ |
70 | bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | 69 | bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) |
71 | 70 | ||
72 | #define physids_or(dst, src1, src2) \ | 71 | #define physids_or(dst, src1, src2) \ |
73 | bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) | 72 | bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) |
74 | 73 | ||
75 | #define physids_clear(map) \ | 74 | #define physids_clear(map) \ |
76 | bitmap_zero((map).mask, MAX_APICS) | 75 | bitmap_zero((map).mask, MAX_APICS) |
77 | 76 | ||
78 | #define physids_complement(dst, src) \ | 77 | #define physids_complement(dst, src) \ |
79 | bitmap_complement((dst).mask, (src).mask, MAX_APICS) | 78 | bitmap_complement((dst).mask, (src).mask, MAX_APICS) |
80 | 79 | ||
81 | #define physids_empty(map) \ | 80 | #define physids_empty(map) \ |
82 | bitmap_empty((map).mask, MAX_APICS) | 81 | bitmap_empty((map).mask, MAX_APICS) |
83 | 82 | ||
84 | #define physids_equal(map1, map2) \ | 83 | #define physids_equal(map1, map2) \ |
85 | bitmap_equal((map1).mask, (map2).mask, MAX_APICS) | 84 | bitmap_equal((map1).mask, (map2).mask, MAX_APICS) |
86 | 85 | ||
87 | #define physids_weight(map) \ | 86 | #define physids_weight(map) \ |
88 | bitmap_weight((map).mask, MAX_APICS) | 87 | bitmap_weight((map).mask, MAX_APICS) |
89 | 88 | ||
90 | #define physids_shift_right(d, s, n) \ | 89 | #define physids_shift_right(d, s, n) \ |
91 | bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) | 90 | bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) |
92 | 91 | ||
93 | #define physids_shift_left(d, s, n) \ | 92 | #define physids_shift_left(d, s, n) \ |
94 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 93 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
95 | 94 | ||
96 | #define physids_coerce(map) ((map).mask[0]) | 95 | #define physids_coerce(map) ((map).mask[0]) |
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h index 3504617fe648..dc6ef85e3624 100644 --- a/include/asm-x86/mpspec_def.h +++ b/include/asm-x86/mpspec_def.h | |||
@@ -11,7 +11,7 @@ | |||
11 | * information is. | 11 | * information is. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') | 14 | #define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_') |
15 | 15 | ||
16 | #ifdef CONFIG_X86_32 | 16 | #ifdef CONFIG_X86_32 |
17 | # define MAX_MPC_ENTRY 1024 | 17 | # define MAX_MPC_ENTRY 1024 |
@@ -23,8 +23,7 @@ | |||
23 | # define MAX_APICS 255 | 23 | # define MAX_APICS 255 |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | struct intel_mp_floating | 26 | struct intel_mp_floating { |
27 | { | ||
28 | char mpf_signature[4]; /* "_MP_" */ | 27 | char mpf_signature[4]; /* "_MP_" */ |
29 | unsigned int mpf_physptr; /* Configuration table address */ | 28 | unsigned int mpf_physptr; /* Configuration table address */ |
30 | unsigned char mpf_length; /* Our length (paragraphs) */ | 29 | unsigned char mpf_length; /* Our length (paragraphs) */ |
@@ -39,14 +38,13 @@ struct intel_mp_floating | |||
39 | 38 | ||
40 | #define MPC_SIGNATURE "PCMP" | 39 | #define MPC_SIGNATURE "PCMP" |
41 | 40 | ||
42 | struct mp_config_table | 41 | struct mp_config_table { |
43 | { | ||
44 | char mpc_signature[4]; | 42 | char mpc_signature[4]; |
45 | unsigned short mpc_length; /* Size of table */ | 43 | unsigned short mpc_length; /* Size of table */ |
46 | char mpc_spec; /* 0x01 */ | 44 | char mpc_spec; /* 0x01 */ |
47 | char mpc_checksum; | 45 | char mpc_checksum; |
48 | char mpc_oem[8]; | 46 | char mpc_oem[8]; |
49 | char mpc_productid[12]; | 47 | char mpc_productid[12]; |
50 | unsigned int mpc_oemptr; /* 0 if not present */ | 48 | unsigned int mpc_oemptr; /* 0 if not present */ |
51 | unsigned short mpc_oemsize; /* 0 if not present */ | 49 | unsigned short mpc_oemsize; /* 0 if not present */ |
52 | unsigned short mpc_oemcount; | 50 | unsigned short mpc_oemcount; |
@@ -71,8 +69,7 @@ struct mp_config_table | |||
71 | #define CPU_MODEL_MASK 0x00F0 | 69 | #define CPU_MODEL_MASK 0x00F0 |
72 | #define CPU_FAMILY_MASK 0x0F00 | 70 | #define CPU_FAMILY_MASK 0x0F00 |
73 | 71 | ||
74 | struct mpc_config_processor | 72 | struct mpc_config_processor { |
75 | { | ||
76 | unsigned char mpc_type; | 73 | unsigned char mpc_type; |
77 | unsigned char mpc_apicid; /* Local APIC number */ | 74 | unsigned char mpc_apicid; /* Local APIC number */ |
78 | unsigned char mpc_apicver; /* Its versions */ | 75 | unsigned char mpc_apicver; /* Its versions */ |
@@ -82,8 +79,7 @@ struct mpc_config_processor | |||
82 | unsigned int mpc_reserved[2]; | 79 | unsigned int mpc_reserved[2]; |
83 | }; | 80 | }; |
84 | 81 | ||
85 | struct mpc_config_bus | 82 | struct mpc_config_bus { |
86 | { | ||
87 | unsigned char mpc_type; | 83 | unsigned char mpc_type; |
88 | unsigned char mpc_busid; | 84 | unsigned char mpc_busid; |
89 | unsigned char mpc_bustype[6]; | 85 | unsigned char mpc_bustype[6]; |
@@ -111,8 +107,7 @@ struct mpc_config_bus | |||
111 | 107 | ||
112 | #define MPC_APIC_USABLE 0x01 | 108 | #define MPC_APIC_USABLE 0x01 |
113 | 109 | ||
114 | struct mpc_config_ioapic | 110 | struct mpc_config_ioapic { |
115 | { | ||
116 | unsigned char mpc_type; | 111 | unsigned char mpc_type; |
117 | unsigned char mpc_apicid; | 112 | unsigned char mpc_apicid; |
118 | unsigned char mpc_apicver; | 113 | unsigned char mpc_apicver; |
@@ -120,8 +115,7 @@ struct mpc_config_ioapic | |||
120 | unsigned int mpc_apicaddr; | 115 | unsigned int mpc_apicaddr; |
121 | }; | 116 | }; |
122 | 117 | ||
123 | struct mpc_config_intsrc | 118 | struct mpc_config_intsrc { |
124 | { | ||
125 | unsigned char mpc_type; | 119 | unsigned char mpc_type; |
126 | unsigned char mpc_irqtype; | 120 | unsigned char mpc_irqtype; |
127 | unsigned short mpc_irqflag; | 121 | unsigned short mpc_irqflag; |
@@ -144,8 +138,7 @@ enum mp_irq_source_types { | |||
144 | 138 | ||
145 | #define MP_APIC_ALL 0xFF | 139 | #define MP_APIC_ALL 0xFF |
146 | 140 | ||
147 | struct mpc_config_lintsrc | 141 | struct mpc_config_lintsrc { |
148 | { | ||
149 | unsigned char mpc_type; | 142 | unsigned char mpc_type; |
150 | unsigned char mpc_irqtype; | 143 | unsigned char mpc_irqtype; |
151 | unsigned short mpc_irqflag; | 144 | unsigned short mpc_irqflag; |
@@ -157,8 +150,7 @@ struct mpc_config_lintsrc | |||
157 | 150 | ||
158 | #define MPC_OEM_SIGNATURE "_OEM" | 151 | #define MPC_OEM_SIGNATURE "_OEM" |
159 | 152 | ||
160 | struct mp_config_oemtable | 153 | struct mp_config_oemtable { |
161 | { | ||
162 | char oem_signature[4]; | 154 | char oem_signature[4]; |
163 | unsigned short oem_length; /* Size of table */ | 155 | unsigned short oem_length; /* Size of table */ |
164 | char oem_rev; /* 0x01 */ | 156 | char oem_rev; /* 0x01 */ |
@@ -166,17 +158,6 @@ struct mp_config_oemtable | |||
166 | char mpc_oem[8]; | 158 | char mpc_oem[8]; |
167 | }; | 159 | }; |
168 | 160 | ||
169 | struct mpc_config_translation | ||
170 | { | ||
171 | unsigned char mpc_type; | ||
172 | unsigned char trans_len; | ||
173 | unsigned char trans_type; | ||
174 | unsigned char trans_quad; | ||
175 | unsigned char trans_global; | ||
176 | unsigned char trans_local; | ||
177 | unsigned short trans_reserved; | ||
178 | }; | ||
179 | |||
180 | /* | 161 | /* |
181 | * Default configurations | 162 | * Default configurations |
182 | * | 163 | * |
@@ -196,4 +177,3 @@ enum mp_bustype { | |||
196 | MP_BUS_MCA, | 177 | MP_BUS_MCA, |
197 | }; | 178 | }; |
198 | #endif | 179 | #endif |
199 | |||
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h index 5b8acddb70fb..296f29ce426d 100644 --- a/include/asm-x86/msidef.h +++ b/include/asm-x86/msidef.h | |||
@@ -11,7 +11,8 @@ | |||
11 | 11 | ||
12 | #define MSI_DATA_VECTOR_SHIFT 0 | 12 | #define MSI_DATA_VECTOR_SHIFT 0 |
13 | #define MSI_DATA_VECTOR_MASK 0x000000ff | 13 | #define MSI_DATA_VECTOR_MASK 0x000000ff |
14 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) | 14 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \ |
15 | MSI_DATA_VECTOR_MASK) | ||
15 | 16 | ||
16 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 | 17 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 |
17 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) | 18 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) |
@@ -37,11 +38,14 @@ | |||
37 | #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) | 38 | #define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) |
38 | 39 | ||
39 | #define MSI_ADDR_REDIRECTION_SHIFT 3 | 40 | #define MSI_ADDR_REDIRECTION_SHIFT 3 |
40 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ | 41 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) |
41 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ | 42 | /* dedicated cpu */ |
43 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) | ||
44 | /* lowest priority */ | ||
42 | 45 | ||
43 | #define MSI_ADDR_DEST_ID_SHIFT 12 | 46 | #define MSI_ADDR_DEST_ID_SHIFT 12 |
44 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 | 47 | #define MSI_ADDR_DEST_ID_MASK 0x00ffff0 |
45 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) | 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ |
49 | MSI_ADDR_DEST_ID_MASK) | ||
46 | 50 | ||
47 | #endif /* ASM_MSIDEF_H */ | 51 | #endif /* ASM_MSIDEF_H */ |
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index fae118a25278..09413ad39d3c 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -57,6 +57,8 @@ | |||
57 | #define MSR_MTRRfix4K_F8000 0x0000026f | 57 | #define MSR_MTRRfix4K_F8000 0x0000026f |
58 | #define MSR_MTRRdefType 0x000002ff | 58 | #define MSR_MTRRdefType 0x000002ff |
59 | 59 | ||
60 | #define MSR_IA32_CR_PAT 0x00000277 | ||
61 | |||
60 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 | 62 | #define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
61 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db | 63 | #define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
62 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc | 64 | #define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
@@ -83,6 +85,7 @@ | |||
83 | /* AMD64 MSRs. Not complete. See the architecture manual for a more | 85 | /* AMD64 MSRs. Not complete. See the architecture manual for a more |
84 | complete list. */ | 86 | complete list. */ |
85 | 87 | ||
88 | #define MSR_AMD64_NB_CFG 0xc001001f | ||
86 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 89 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
87 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 90 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
88 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 91 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
@@ -109,6 +112,7 @@ | |||
109 | #define MSR_K8_SYSCFG 0xc0010010 | 112 | #define MSR_K8_SYSCFG 0xc0010010 |
110 | #define MSR_K8_HWCR 0xc0010015 | 113 | #define MSR_K8_HWCR 0xc0010015 |
111 | #define MSR_K8_ENABLE_C1E 0xc0010055 | 114 | #define MSR_K8_ENABLE_C1E 0xc0010055 |
115 | #define MSR_K8_TSEG_ADDR 0xc0010112 | ||
112 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ | 116 | #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
113 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ | 117 | #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
114 | #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ | 118 | #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 3ca29ebebbb1..3707650a169b 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h | |||
@@ -16,8 +16,8 @@ | |||
16 | static inline unsigned long long native_read_tscp(unsigned int *aux) | 16 | static inline unsigned long long native_read_tscp(unsigned int *aux) |
17 | { | 17 | { |
18 | unsigned long low, high; | 18 | unsigned long low, high; |
19 | asm volatile (".byte 0x0f,0x01,0xf9" | 19 | asm volatile(".byte 0x0f,0x01,0xf9" |
20 | : "=a" (low), "=d" (high), "=c" (*aux)); | 20 | : "=a" (low), "=d" (high), "=c" (*aux)); |
21 | return low | ((u64)high >> 32); | 21 | return low | ((u64)high >> 32); |
22 | } | 22 | } |
23 | 23 | ||
@@ -29,7 +29,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) | |||
29 | */ | 29 | */ |
30 | #ifdef CONFIG_X86_64 | 30 | #ifdef CONFIG_X86_64 |
31 | #define DECLARE_ARGS(val, low, high) unsigned low, high | 31 | #define DECLARE_ARGS(val, low, high) unsigned low, high |
32 | #define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32)) | 32 | #define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) |
33 | #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) | 33 | #define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) |
34 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) | 34 | #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
35 | #else | 35 | #else |
@@ -57,7 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, | |||
57 | ".section .fixup,\"ax\"\n\t" | 57 | ".section .fixup,\"ax\"\n\t" |
58 | "3: mov %3,%0 ; jmp 1b\n\t" | 58 | "3: mov %3,%0 ; jmp 1b\n\t" |
59 | ".previous\n\t" | 59 | ".previous\n\t" |
60 | _ASM_EXTABLE(2b,3b) | 60 | _ASM_EXTABLE(2b, 3b) |
61 | : "=r" (*err), EAX_EDX_RET(val, low, high) | 61 | : "=r" (*err), EAX_EDX_RET(val, low, high) |
62 | : "c" (msr), "i" (-EFAULT)); | 62 | : "c" (msr), "i" (-EFAULT)); |
63 | return EAX_EDX_VAL(val, low, high); | 63 | return EAX_EDX_VAL(val, low, high); |
@@ -78,10 +78,10 @@ static inline int native_write_msr_safe(unsigned int msr, | |||
78 | ".section .fixup,\"ax\"\n\t" | 78 | ".section .fixup,\"ax\"\n\t" |
79 | "3: mov %4,%0 ; jmp 1b\n\t" | 79 | "3: mov %4,%0 ; jmp 1b\n\t" |
80 | ".previous\n\t" | 80 | ".previous\n\t" |
81 | _ASM_EXTABLE(2b,3b) | 81 | _ASM_EXTABLE(2b, 3b) |
82 | : "=a" (err) | 82 | : "=a" (err) |
83 | : "c" (msr), "0" (low), "d" (high), | 83 | : "c" (msr), "0" (low), "d" (high), |
84 | "i" (-EFAULT)); | 84 | "i" (-EFAULT)); |
85 | return err; | 85 | return err; |
86 | } | 86 | } |
87 | 87 | ||
@@ -116,23 +116,23 @@ static inline unsigned long long native_read_pmc(int counter) | |||
116 | * pointer indirection), this allows gcc to optimize better | 116 | * pointer indirection), this allows gcc to optimize better |
117 | */ | 117 | */ |
118 | 118 | ||
119 | #define rdmsr(msr,val1,val2) \ | 119 | #define rdmsr(msr, val1, val2) \ |
120 | do { \ | 120 | do { \ |
121 | u64 __val = native_read_msr(msr); \ | 121 | u64 __val = native_read_msr((msr)); \ |
122 | (val1) = (u32)__val; \ | 122 | (val1) = (u32)__val; \ |
123 | (val2) = (u32)(__val >> 32); \ | 123 | (val2) = (u32)(__val >> 32); \ |
124 | } while(0) | 124 | } while (0) |
125 | 125 | ||
126 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) | 126 | static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
127 | { | 127 | { |
128 | native_write_msr(msr, low, high); | 128 | native_write_msr(msr, low, high); |
129 | } | 129 | } |
130 | 130 | ||
131 | #define rdmsrl(msr,val) \ | 131 | #define rdmsrl(msr, val) \ |
132 | ((val) = native_read_msr(msr)) | 132 | ((val) = native_read_msr((msr))) |
133 | 133 | ||
134 | #define wrmsrl(msr, val) \ | 134 | #define wrmsrl(msr, val) \ |
135 | native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32)) | 135 | native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) |
136 | 136 | ||
137 | /* wrmsr with exception handling */ | 137 | /* wrmsr with exception handling */ |
138 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | 138 | static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
@@ -141,14 +141,22 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | /* rdmsr with exception handling */ | 143 | /* rdmsr with exception handling */ |
144 | #define rdmsr_safe(msr,p1,p2) \ | 144 | #define rdmsr_safe(msr, p1, p2) \ |
145 | ({ \ | 145 | ({ \ |
146 | int __err; \ | 146 | int __err; \ |
147 | u64 __val = native_read_msr_safe(msr, &__err); \ | 147 | u64 __val = native_read_msr_safe((msr), &__err); \ |
148 | (*p1) = (u32)__val; \ | 148 | (*p1) = (u32)__val; \ |
149 | (*p2) = (u32)(__val >> 32); \ | 149 | (*p2) = (u32)(__val >> 32); \ |
150 | __err; \ | 150 | __err; \ |
151 | }) | 151 | }) |
152 | |||
153 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | ||
154 | { | ||
155 | int err; | ||
156 | |||
157 | *p = native_read_msr_safe(msr, &err); | ||
158 | return err; | ||
159 | } | ||
152 | 160 | ||
153 | #define rdtscl(low) \ | 161 | #define rdtscl(low) \ |
154 | ((low) = (u32)native_read_tsc()) | 162 | ((low) = (u32)native_read_tsc()) |
@@ -156,35 +164,37 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) | |||
156 | #define rdtscll(val) \ | 164 | #define rdtscll(val) \ |
157 | ((val) = native_read_tsc()) | 165 | ((val) = native_read_tsc()) |
158 | 166 | ||
159 | #define rdpmc(counter,low,high) \ | 167 | #define rdpmc(counter, low, high) \ |
160 | do { \ | 168 | do { \ |
161 | u64 _l = native_read_pmc(counter); \ | 169 | u64 _l = native_read_pmc((counter)); \ |
162 | (low) = (u32)_l; \ | 170 | (low) = (u32)_l; \ |
163 | (high) = (u32)(_l >> 32); \ | 171 | (high) = (u32)(_l >> 32); \ |
164 | } while(0) | 172 | } while (0) |
165 | 173 | ||
166 | #define rdtscp(low, high, aux) \ | 174 | #define rdtscp(low, high, aux) \ |
167 | do { \ | 175 | do { \ |
168 | unsigned long long _val = native_read_tscp(&(aux)); \ | 176 | unsigned long long _val = native_read_tscp(&(aux)); \ |
169 | (low) = (u32)_val; \ | 177 | (low) = (u32)_val; \ |
170 | (high) = (u32)(_val >> 32); \ | 178 | (high) = (u32)(_val >> 32); \ |
171 | } while (0) | 179 | } while (0) |
172 | 180 | ||
173 | #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) | 181 | #define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) |
174 | 182 | ||
175 | #endif /* !CONFIG_PARAVIRT */ | 183 | #endif /* !CONFIG_PARAVIRT */ |
176 | 184 | ||
177 | 185 | ||
178 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) | 186 | #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ |
187 | (u32)((val) >> 32)) | ||
179 | 188 | ||
180 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | 189 | #define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2)) |
181 | 190 | ||
182 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) | 191 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) |
183 | 192 | ||
184 | #ifdef CONFIG_SMP | 193 | #ifdef CONFIG_SMP |
185 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 194 | void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
186 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 195 | void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
187 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | 196 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
197 | |||
188 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | 198 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
189 | #else /* CONFIG_SMP */ | 199 | #else /* CONFIG_SMP */ |
190 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 200 | static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
@@ -195,7 +205,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
195 | { | 205 | { |
196 | wrmsr(msr_no, l, h); | 206 | wrmsr(msr_no, l, h); |
197 | } | 207 | } |
198 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | 208 | static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
209 | u32 *l, u32 *h) | ||
199 | { | 210 | { |
200 | return rdmsr_safe(msr_no, l, h); | 211 | return rdmsr_safe(msr_no, l, h); |
201 | } | 212 | } |
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h index 319d065800be..a69a01a51729 100644 --- a/include/asm-x86/mtrr.h +++ b/include/asm-x86/mtrr.h | |||
@@ -28,8 +28,7 @@ | |||
28 | 28 | ||
29 | #define MTRR_IOCTL_BASE 'M' | 29 | #define MTRR_IOCTL_BASE 'M' |
30 | 30 | ||
31 | struct mtrr_sentry | 31 | struct mtrr_sentry { |
32 | { | ||
33 | unsigned long base; /* Base address */ | 32 | unsigned long base; /* Base address */ |
34 | unsigned int size; /* Size of region */ | 33 | unsigned int size; /* Size of region */ |
35 | unsigned int type; /* Type of region */ | 34 | unsigned int type; /* Type of region */ |
@@ -41,8 +40,7 @@ struct mtrr_sentry | |||
41 | will break. */ | 40 | will break. */ |
42 | 41 | ||
43 | #ifdef __i386__ | 42 | #ifdef __i386__ |
44 | struct mtrr_gentry | 43 | struct mtrr_gentry { |
45 | { | ||
46 | unsigned int regnum; /* Register number */ | 44 | unsigned int regnum; /* Register number */ |
47 | unsigned long base; /* Base address */ | 45 | unsigned long base; /* Base address */ |
48 | unsigned int size; /* Size of region */ | 46 | unsigned int size; /* Size of region */ |
@@ -51,8 +49,7 @@ struct mtrr_gentry | |||
51 | 49 | ||
52 | #else /* __i386__ */ | 50 | #else /* __i386__ */ |
53 | 51 | ||
54 | struct mtrr_gentry | 52 | struct mtrr_gentry { |
55 | { | ||
56 | unsigned long base; /* Base address */ | 53 | unsigned long base; /* Base address */ |
57 | unsigned int size; /* Size of region */ | 54 | unsigned int size; /* Size of region */ |
58 | unsigned int regnum; /* Register number */ | 55 | unsigned int regnum; /* Register number */ |
@@ -86,38 +83,45 @@ struct mtrr_gentry | |||
86 | 83 | ||
87 | /* The following functions are for use by other drivers */ | 84 | /* The following functions are for use by other drivers */ |
88 | # ifdef CONFIG_MTRR | 85 | # ifdef CONFIG_MTRR |
86 | extern u8 mtrr_type_lookup(u64 addr, u64 end); | ||
89 | extern void mtrr_save_fixed_ranges(void *); | 87 | extern void mtrr_save_fixed_ranges(void *); |
90 | extern void mtrr_save_state(void); | 88 | extern void mtrr_save_state(void); |
91 | extern int mtrr_add (unsigned long base, unsigned long size, | 89 | extern int mtrr_add(unsigned long base, unsigned long size, |
92 | unsigned int type, bool increment); | 90 | unsigned int type, bool increment); |
93 | extern int mtrr_add_page (unsigned long base, unsigned long size, | 91 | extern int mtrr_add_page(unsigned long base, unsigned long size, |
94 | unsigned int type, bool increment); | 92 | unsigned int type, bool increment); |
95 | extern int mtrr_del (int reg, unsigned long base, unsigned long size); | 93 | extern int mtrr_del(int reg, unsigned long base, unsigned long size); |
96 | extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); | 94 | extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); |
97 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); | 95 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); |
98 | extern void mtrr_ap_init(void); | 96 | extern void mtrr_ap_init(void); |
99 | extern void mtrr_bp_init(void); | 97 | extern void mtrr_bp_init(void); |
100 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); | 98 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); |
99 | extern int amd_special_default_mtrr(void); | ||
101 | # else | 100 | # else |
101 | static inline u8 mtrr_type_lookup(u64 addr, u64 end) | ||
102 | { | ||
103 | /* | ||
104 | * Return no-MTRRs: | ||
105 | */ | ||
106 | return 0xff; | ||
107 | } | ||
102 | #define mtrr_save_fixed_ranges(arg) do {} while (0) | 108 | #define mtrr_save_fixed_ranges(arg) do {} while (0) |
103 | #define mtrr_save_state() do {} while (0) | 109 | #define mtrr_save_state() do {} while (0) |
104 | static __inline__ int mtrr_add (unsigned long base, unsigned long size, | 110 | static inline int mtrr_add(unsigned long base, unsigned long size, |
105 | unsigned int type, bool increment) | 111 | unsigned int type, bool increment) |
106 | { | 112 | { |
107 | return -ENODEV; | 113 | return -ENODEV; |
108 | } | 114 | } |
109 | static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, | 115 | static inline int mtrr_add_page(unsigned long base, unsigned long size, |
110 | unsigned int type, bool increment) | 116 | unsigned int type, bool increment) |
111 | { | 117 | { |
112 | return -ENODEV; | 118 | return -ENODEV; |
113 | } | 119 | } |
114 | static __inline__ int mtrr_del (int reg, unsigned long base, | 120 | static inline int mtrr_del(int reg, unsigned long base, unsigned long size) |
115 | unsigned long size) | ||
116 | { | 121 | { |
117 | return -ENODEV; | 122 | return -ENODEV; |
118 | } | 123 | } |
119 | static __inline__ int mtrr_del_page (int reg, unsigned long base, | 124 | static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
120 | unsigned long size) | ||
121 | { | 125 | { |
122 | return -ENODEV; | 126 | return -ENODEV; |
123 | } | 127 | } |
@@ -125,7 +129,9 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
125 | { | 129 | { |
126 | return 0; | 130 | return 0; |
127 | } | 131 | } |
128 | static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} | 132 | static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) |
133 | { | ||
134 | } | ||
129 | 135 | ||
130 | #define mtrr_ap_init() do {} while (0) | 136 | #define mtrr_ap_init() do {} while (0) |
131 | #define mtrr_bp_init() do {} while (0) | 137 | #define mtrr_bp_init() do {} while (0) |
@@ -134,15 +140,13 @@ static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} | |||
134 | #ifdef CONFIG_COMPAT | 140 | #ifdef CONFIG_COMPAT |
135 | #include <linux/compat.h> | 141 | #include <linux/compat.h> |
136 | 142 | ||
137 | struct mtrr_sentry32 | 143 | struct mtrr_sentry32 { |
138 | { | ||
139 | compat_ulong_t base; /* Base address */ | 144 | compat_ulong_t base; /* Base address */ |
140 | compat_uint_t size; /* Size of region */ | 145 | compat_uint_t size; /* Size of region */ |
141 | compat_uint_t type; /* Type of region */ | 146 | compat_uint_t type; /* Type of region */ |
142 | }; | 147 | }; |
143 | 148 | ||
144 | struct mtrr_gentry32 | 149 | struct mtrr_gentry32 { |
145 | { | ||
146 | compat_ulong_t regnum; /* Register number */ | 150 | compat_ulong_t regnum; /* Register number */ |
147 | compat_uint_t base; /* Base address */ | 151 | compat_uint_t base; /* Base address */ |
148 | compat_uint_t size; /* Size of region */ | 152 | compat_uint_t size; /* Size of region */ |
@@ -151,16 +155,17 @@ struct mtrr_gentry32 | |||
151 | 155 | ||
152 | #define MTRR_IOCTL_BASE 'M' | 156 | #define MTRR_IOCTL_BASE 'M' |
153 | 157 | ||
154 | #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) | 158 | #define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32) |
155 | #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) | 159 | #define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32) |
156 | #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) | 160 | #define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32) |
157 | #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) | 161 | #define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32) |
158 | #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) | 162 | #define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32) |
159 | #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) | 163 | #define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32) |
160 | #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) | 164 | #define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32) |
161 | #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) | 165 | #define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32) |
162 | #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) | 166 | #define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32) |
163 | #define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) | 167 | #define MTRRIOC32_KILL_PAGE_ENTRY \ |
168 | _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) | ||
164 | #endif /* CONFIG_COMPAT */ | 169 | #endif /* CONFIG_COMPAT */ |
165 | 170 | ||
166 | #endif /* __KERNEL__ */ | 171 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h index bbeefb96ddfd..73e928ef5f03 100644 --- a/include/asm-x86/mutex_32.h +++ b/include/asm-x86/mutex_32.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #ifndef _ASM_MUTEX_H | 9 | #ifndef _ASM_MUTEX_H |
10 | #define _ASM_MUTEX_H | 10 | #define _ASM_MUTEX_H |
11 | 11 | ||
12 | #include "asm/alternative.h" | 12 | #include <asm/alternative.h> |
13 | 13 | ||
14 | /** | 14 | /** |
15 | * __mutex_fastpath_lock - try to take the lock by moving the count | 15 | * __mutex_fastpath_lock - try to take the lock by moving the count |
@@ -21,22 +21,20 @@ | |||
21 | * wasn't 1 originally. This function MUST leave the value lower than 1 | 21 | * wasn't 1 originally. This function MUST leave the value lower than 1 |
22 | * even when the "1" assertion wasn't true. | 22 | * even when the "1" assertion wasn't true. |
23 | */ | 23 | */ |
24 | #define __mutex_fastpath_lock(count, fail_fn) \ | 24 | #define __mutex_fastpath_lock(count, fail_fn) \ |
25 | do { \ | 25 | do { \ |
26 | unsigned int dummy; \ | 26 | unsigned int dummy; \ |
27 | \ | 27 | \ |
28 | typecheck(atomic_t *, count); \ | 28 | typecheck(atomic_t *, count); \ |
29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 29 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
30 | \ | 30 | \ |
31 | __asm__ __volatile__( \ | 31 | asm volatile(LOCK_PREFIX " decl (%%eax)\n" \ |
32 | LOCK_PREFIX " decl (%%eax) \n" \ | 32 | " jns 1f \n" \ |
33 | " jns 1f \n" \ | 33 | " call " #fail_fn "\n" \ |
34 | " call "#fail_fn" \n" \ | 34 | "1:\n" \ |
35 | "1: \n" \ | 35 | : "=a" (dummy) \ |
36 | \ | 36 | : "a" (count) \ |
37 | :"=a" (dummy) \ | 37 | : "memory", "ecx", "edx"); \ |
38 | : "a" (count) \ | ||
39 | : "memory", "ecx", "edx"); \ | ||
40 | } while (0) | 38 | } while (0) |
41 | 39 | ||
42 | 40 | ||
@@ -50,8 +48,8 @@ do { \ | |||
50 | * wasn't 1 originally. This function returns 0 if the fastpath succeeds, | 48 | * wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
51 | * or anything the slow path function returns | 49 | * or anything the slow path function returns |
52 | */ | 50 | */ |
53 | static inline int | 51 | static inline int __mutex_fastpath_lock_retval(atomic_t *count, |
54 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 52 | int (*fail_fn)(atomic_t *)) |
55 | { | 53 | { |
56 | if (unlikely(atomic_dec_return(count) < 0)) | 54 | if (unlikely(atomic_dec_return(count) < 0)) |
57 | return fail_fn(count); | 55 | return fail_fn(count); |
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
72 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | 70 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs |
73 | * to return 0 otherwise. | 71 | * to return 0 otherwise. |
74 | */ | 72 | */ |
75 | #define __mutex_fastpath_unlock(count, fail_fn) \ | 73 | #define __mutex_fastpath_unlock(count, fail_fn) \ |
76 | do { \ | 74 | do { \ |
77 | unsigned int dummy; \ | 75 | unsigned int dummy; \ |
78 | \ | 76 | \ |
79 | typecheck(atomic_t *, count); \ | 77 | typecheck(atomic_t *, count); \ |
80 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 78 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
81 | \ | 79 | \ |
82 | __asm__ __volatile__( \ | 80 | asm volatile(LOCK_PREFIX " incl (%%eax)\n" \ |
83 | LOCK_PREFIX " incl (%%eax) \n" \ | 81 | " jg 1f\n" \ |
84 | " jg 1f \n" \ | 82 | " call " #fail_fn "\n" \ |
85 | " call "#fail_fn" \n" \ | 83 | "1:\n" \ |
86 | "1: \n" \ | 84 | : "=a" (dummy) \ |
87 | \ | 85 | : "a" (count) \ |
88 | :"=a" (dummy) \ | 86 | : "memory", "ecx", "edx"); \ |
89 | : "a" (count) \ | ||
90 | : "memory", "ecx", "edx"); \ | ||
91 | } while (0) | 87 | } while (0) |
92 | 88 | ||
93 | #define __mutex_slowpath_needs_to_unlock() 1 | 89 | #define __mutex_slowpath_needs_to_unlock() 1 |
@@ -104,8 +100,8 @@ do { \ | |||
104 | * Additionally, if the value was < 0 originally, this function must not leave | 100 | * Additionally, if the value was < 0 originally, this function must not leave |
105 | * it to 0 on failure. | 101 | * it to 0 on failure. |
106 | */ | 102 | */ |
107 | static inline int | 103 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
108 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 104 | int (*fail_fn)(atomic_t *)) |
109 | { | 105 | { |
110 | /* | 106 | /* |
111 | * We have two variants here. The cmpxchg based one is the best one | 107 | * We have two variants here. The cmpxchg based one is the best one |
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h index 6c2949a3c677..f3fae9becb38 100644 --- a/include/asm-x86/mutex_64.h +++ b/include/asm-x86/mutex_64.h | |||
@@ -16,23 +16,21 @@ | |||
16 | * | 16 | * |
17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. | 17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. |
18 | */ | 18 | */ |
19 | #define __mutex_fastpath_lock(v, fail_fn) \ | 19 | #define __mutex_fastpath_lock(v, fail_fn) \ |
20 | do { \ | 20 | do { \ |
21 | unsigned long dummy; \ | 21 | unsigned long dummy; \ |
22 | \ | 22 | \ |
23 | typecheck(atomic_t *, v); \ | 23 | typecheck(atomic_t *, v); \ |
24 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 24 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
25 | \ | 25 | \ |
26 | __asm__ __volatile__( \ | 26 | asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \ |
27 | LOCK_PREFIX " decl (%%rdi) \n" \ | 27 | " jns 1f \n" \ |
28 | " jns 1f \n" \ | 28 | " call " #fail_fn "\n" \ |
29 | " call "#fail_fn" \n" \ | 29 | "1:" \ |
30 | "1:" \ | 30 | : "=D" (dummy) \ |
31 | \ | 31 | : "D" (v) \ |
32 | :"=D" (dummy) \ | 32 | : "rax", "rsi", "rdx", "rcx", \ |
33 | : "D" (v) \ | 33 | "r8", "r9", "r10", "r11", "memory"); \ |
34 | : "rax", "rsi", "rdx", "rcx", \ | ||
35 | "r8", "r9", "r10", "r11", "memory"); \ | ||
36 | } while (0) | 34 | } while (0) |
37 | 35 | ||
38 | /** | 36 | /** |
@@ -45,9 +43,8 @@ do { \ | |||
45 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | 43 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
46 | * or anything the slow path function returns | 44 | * or anything the slow path function returns |
47 | */ | 45 | */ |
48 | static inline int | 46 | static inline int __mutex_fastpath_lock_retval(atomic_t *count, |
49 | __mutex_fastpath_lock_retval(atomic_t *count, | 47 | int (*fail_fn)(atomic_t *)) |
50 | int (*fail_fn)(atomic_t *)) | ||
51 | { | 48 | { |
52 | if (unlikely(atomic_dec_return(count) < 0)) | 49 | if (unlikely(atomic_dec_return(count) < 0)) |
53 | return fail_fn(count); | 50 | return fail_fn(count); |
@@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count, | |||
62 | * | 59 | * |
63 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. | 60 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. |
64 | */ | 61 | */ |
65 | #define __mutex_fastpath_unlock(v, fail_fn) \ | 62 | #define __mutex_fastpath_unlock(v, fail_fn) \ |
66 | do { \ | 63 | do { \ |
67 | unsigned long dummy; \ | 64 | unsigned long dummy; \ |
68 | \ | 65 | \ |
69 | typecheck(atomic_t *, v); \ | 66 | typecheck(atomic_t *, v); \ |
70 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ | 67 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
71 | \ | 68 | \ |
72 | __asm__ __volatile__( \ | 69 | asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \ |
73 | LOCK_PREFIX " incl (%%rdi) \n" \ | 70 | " jg 1f\n" \ |
74 | " jg 1f \n" \ | 71 | " call " #fail_fn "\n" \ |
75 | " call "#fail_fn" \n" \ | 72 | "1:" \ |
76 | "1: " \ | 73 | : "=D" (dummy) \ |
77 | \ | 74 | : "D" (v) \ |
78 | :"=D" (dummy) \ | 75 | : "rax", "rsi", "rdx", "rcx", \ |
79 | : "D" (v) \ | 76 | "r8", "r9", "r10", "r11", "memory"); \ |
80 | : "rax", "rsi", "rdx", "rcx", \ | ||
81 | "r8", "r9", "r10", "r11", "memory"); \ | ||
82 | } while (0) | 77 | } while (0) |
83 | 78 | ||
84 | #define __mutex_slowpath_needs_to_unlock() 1 | 79 | #define __mutex_slowpath_needs_to_unlock() 1 |
@@ -93,8 +88,8 @@ do { \ | |||
93 | * if it wasn't 1 originally. [the fallback function is never used on | 88 | * if it wasn't 1 originally. [the fallback function is never used on |
94 | * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] | 89 | * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] |
95 | */ | 90 | */ |
96 | static inline int | 91 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
97 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 92 | int (*fail_fn)(atomic_t *)) |
98 | { | 93 | { |
99 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) | 94 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
100 | return 1; | 95 | return 1; |
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h index 53ccac14cead..1e363021e72f 100644 --- a/include/asm-x86/nmi.h +++ b/include/asm-x86/nmi.h | |||
@@ -1,5 +1,93 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_NMI_H_ |
2 | # include "nmi_32.h" | 2 | #define _ASM_X86_NMI_H_ |
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <asm/irq.h> | ||
6 | #include <asm/io.h> | ||
7 | |||
8 | #ifdef ARCH_HAS_NMI_WATCHDOG | ||
9 | |||
10 | /** | ||
11 | * do_nmi_callback | ||
12 | * | ||
13 | * Check to see if a callback exists and execute it. Return 1 | ||
14 | * if the handler exists and was handled successfully. | ||
15 | */ | ||
16 | int do_nmi_callback(struct pt_regs *regs, int cpu); | ||
17 | |||
18 | #ifdef CONFIG_PM | ||
19 | |||
20 | /** Replace the PM callback routine for NMI. */ | ||
21 | struct pm_dev *set_nmi_pm_callback(pm_callback callback); | ||
22 | |||
23 | /** Unset the PM callback routine back to the default. */ | ||
24 | void unset_nmi_pm_callback(struct pm_dev *dev); | ||
25 | |||
3 | #else | 26 | #else |
4 | # include "nmi_64.h" | 27 | |
28 | static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static inline void unset_nmi_pm_callback(struct pm_dev *dev) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | #endif /* CONFIG_PM */ | ||
38 | |||
39 | #ifdef CONFIG_X86_64 | ||
40 | extern void default_do_nmi(struct pt_regs *); | ||
41 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | ||
42 | extern void nmi_watchdog_default(void); | ||
43 | #else | ||
44 | #define nmi_watchdog_default() do {} while (0) | ||
45 | #endif | ||
46 | |||
47 | extern int check_nmi_watchdog(void); | ||
48 | extern int nmi_watchdog_enabled; | ||
49 | extern int unknown_nmi_panic; | ||
50 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
51 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
52 | extern int reserve_perfctr_nmi(unsigned int); | ||
53 | extern void release_perfctr_nmi(unsigned int); | ||
54 | extern int reserve_evntsel_nmi(unsigned int); | ||
55 | extern void release_evntsel_nmi(unsigned int); | ||
56 | |||
57 | extern void setup_apic_nmi_watchdog(void *); | ||
58 | extern void stop_apic_nmi_watchdog(void *); | ||
59 | extern void disable_timer_nmi_watchdog(void); | ||
60 | extern void enable_timer_nmi_watchdog(void); | ||
61 | extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); | ||
62 | |||
63 | extern atomic_t nmi_active; | ||
64 | extern unsigned int nmi_watchdog; | ||
65 | #define NMI_DISABLED -1 | ||
66 | #define NMI_NONE 0 | ||
67 | #define NMI_IO_APIC 1 | ||
68 | #define NMI_LOCAL_APIC 2 | ||
69 | #define NMI_INVALID 3 | ||
70 | #define NMI_DEFAULT NMI_DISABLED | ||
71 | |||
72 | struct ctl_table; | ||
73 | struct file; | ||
74 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
75 | void __user *, size_t *, loff_t *); | ||
76 | extern int unknown_nmi_panic; | ||
77 | |||
78 | void __trigger_all_cpu_backtrace(void); | ||
79 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
80 | |||
81 | #endif | ||
82 | |||
83 | void lapic_watchdog_stop(void); | ||
84 | int lapic_watchdog_init(unsigned nmi_hz); | ||
85 | int lapic_wd_event(unsigned nmi_hz); | ||
86 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
87 | int lapic_watchdog_ok(void); | ||
88 | void disable_lapic_nmi_watchdog(void); | ||
89 | void enable_lapic_nmi_watchdog(void); | ||
90 | void stop_nmi(void); | ||
91 | void restart_nmi(void); | ||
92 | |||
5 | #endif | 93 | #endif |
diff --git a/include/asm-x86/nmi_32.h b/include/asm-x86/nmi_32.h deleted file mode 100644 index 7206c7e8a388..000000000000 --- a/include/asm-x86/nmi_32.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | #ifndef ASM_NMI_H | ||
2 | #define ASM_NMI_H | ||
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <asm/irq.h> | ||
6 | |||
7 | #ifdef ARCH_HAS_NMI_WATCHDOG | ||
8 | |||
9 | /** | ||
10 | * do_nmi_callback | ||
11 | * | ||
12 | * Check to see if a callback exists and execute it. Return 1 | ||
13 | * if the handler exists and was handled successfully. | ||
14 | */ | ||
15 | int do_nmi_callback(struct pt_regs *regs, int cpu); | ||
16 | |||
17 | extern int nmi_watchdog_enabled; | ||
18 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
19 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
20 | extern int reserve_perfctr_nmi(unsigned int); | ||
21 | extern void release_perfctr_nmi(unsigned int); | ||
22 | extern int reserve_evntsel_nmi(unsigned int); | ||
23 | extern void release_evntsel_nmi(unsigned int); | ||
24 | |||
25 | extern void setup_apic_nmi_watchdog (void *); | ||
26 | extern void stop_apic_nmi_watchdog (void *); | ||
27 | extern void disable_timer_nmi_watchdog(void); | ||
28 | extern void enable_timer_nmi_watchdog(void); | ||
29 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | ||
30 | |||
31 | extern atomic_t nmi_active; | ||
32 | extern unsigned int nmi_watchdog; | ||
33 | #define NMI_DISABLED -1 | ||
34 | #define NMI_NONE 0 | ||
35 | #define NMI_IO_APIC 1 | ||
36 | #define NMI_LOCAL_APIC 2 | ||
37 | #define NMI_INVALID 3 | ||
38 | #define NMI_DEFAULT NMI_DISABLED | ||
39 | |||
40 | struct ctl_table; | ||
41 | struct file; | ||
42 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
43 | void __user *, size_t *, loff_t *); | ||
44 | extern int unknown_nmi_panic; | ||
45 | |||
46 | void __trigger_all_cpu_backtrace(void); | ||
47 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
48 | |||
49 | #endif | ||
50 | |||
51 | void lapic_watchdog_stop(void); | ||
52 | int lapic_watchdog_init(unsigned nmi_hz); | ||
53 | int lapic_wd_event(unsigned nmi_hz); | ||
54 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
55 | int lapic_watchdog_ok(void); | ||
56 | void disable_lapic_nmi_watchdog(void); | ||
57 | void enable_lapic_nmi_watchdog(void); | ||
58 | void stop_nmi(void); | ||
59 | void restart_nmi(void); | ||
60 | |||
61 | #endif /* ASM_NMI_H */ | ||
diff --git a/include/asm-x86/nmi_64.h b/include/asm-x86/nmi_64.h deleted file mode 100644 index 2eeb74e5f3ff..000000000000 --- a/include/asm-x86/nmi_64.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | #ifndef ASM_NMI_H | ||
2 | #define ASM_NMI_H | ||
3 | |||
4 | #include <linux/pm.h> | ||
5 | #include <asm/io.h> | ||
6 | |||
7 | /** | ||
8 | * do_nmi_callback | ||
9 | * | ||
10 | * Check to see if a callback exists and execute it. Return 1 | ||
11 | * if the handler exists and was handled successfully. | ||
12 | */ | ||
13 | int do_nmi_callback(struct pt_regs *regs, int cpu); | ||
14 | |||
15 | #ifdef CONFIG_PM | ||
16 | |||
17 | /** Replace the PM callback routine for NMI. */ | ||
18 | struct pm_dev * set_nmi_pm_callback(pm_callback callback); | ||
19 | |||
20 | /** Unset the PM callback routine back to the default. */ | ||
21 | void unset_nmi_pm_callback(struct pm_dev * dev); | ||
22 | |||
23 | #else | ||
24 | |||
25 | static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static inline void unset_nmi_pm_callback(struct pm_dev * dev) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | #endif /* CONFIG_PM */ | ||
35 | |||
36 | extern void default_do_nmi(struct pt_regs *); | ||
37 | extern void die_nmi(char *str, struct pt_regs *regs, int do_panic); | ||
38 | |||
39 | #define get_nmi_reason() inb(0x61) | ||
40 | |||
41 | extern int unknown_nmi_panic; | ||
42 | extern int nmi_watchdog_enabled; | ||
43 | |||
44 | extern int check_nmi_watchdog(void); | ||
45 | extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); | ||
46 | extern int avail_to_resrv_perfctr_nmi(unsigned int); | ||
47 | extern int reserve_perfctr_nmi(unsigned int); | ||
48 | extern void release_perfctr_nmi(unsigned int); | ||
49 | extern int reserve_evntsel_nmi(unsigned int); | ||
50 | extern void release_evntsel_nmi(unsigned int); | ||
51 | |||
52 | extern void setup_apic_nmi_watchdog (void *); | ||
53 | extern void stop_apic_nmi_watchdog (void *); | ||
54 | extern void disable_timer_nmi_watchdog(void); | ||
55 | extern void enable_timer_nmi_watchdog(void); | ||
56 | extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); | ||
57 | |||
58 | extern void nmi_watchdog_default(void); | ||
59 | |||
60 | extern atomic_t nmi_active; | ||
61 | extern unsigned int nmi_watchdog; | ||
62 | #define NMI_DISABLED -1 | ||
63 | #define NMI_NONE 0 | ||
64 | #define NMI_IO_APIC 1 | ||
65 | #define NMI_LOCAL_APIC 2 | ||
66 | #define NMI_INVALID 3 | ||
67 | #define NMI_DEFAULT NMI_DISABLED | ||
68 | |||
69 | struct ctl_table; | ||
70 | struct file; | ||
71 | extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | ||
72 | void __user *, size_t *, loff_t *); | ||
73 | |||
74 | extern int unknown_nmi_panic; | ||
75 | |||
76 | void __trigger_all_cpu_backtrace(void); | ||
77 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
78 | |||
79 | |||
80 | void lapic_watchdog_stop(void); | ||
81 | int lapic_watchdog_init(unsigned nmi_hz); | ||
82 | int lapic_wd_event(unsigned nmi_hz); | ||
83 | unsigned lapic_adjust_nmi_hz(unsigned hz); | ||
84 | int lapic_watchdog_ok(void); | ||
85 | void disable_lapic_nmi_watchdog(void); | ||
86 | void enable_lapic_nmi_watchdog(void); | ||
87 | void stop_nmi(void); | ||
88 | void restart_nmi(void); | ||
89 | |||
90 | #endif /* ASM_NMI_H */ | ||
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h index b3930ae539b3..ad0bedd10b89 100644 --- a/include/asm-x86/nops.h +++ b/include/asm-x86/nops.h | |||
@@ -5,6 +5,8 @@ | |||
5 | 5 | ||
6 | /* generic versions from gas | 6 | /* generic versions from gas |
7 | 1: nop | 7 | 1: nop |
8 | the following instructions are NOT nops in 64-bit mode, | ||
9 | for 64-bit mode use K8 or P6 nops instead | ||
8 | 2: movl %esi,%esi | 10 | 2: movl %esi,%esi |
9 | 3: leal 0x00(%esi),%esi | 11 | 3: leal 0x00(%esi),%esi |
10 | 4: leal 0x00(,%esi,1),%esi | 12 | 4: leal 0x00(,%esi,1),%esi |
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h index 15fe07cde586..32c22ae0709f 100644 --- a/include/asm-x86/numa_64.h +++ b/include/asm-x86/numa_64.h | |||
@@ -1,11 +1,12 @@ | |||
1 | #ifndef _ASM_X8664_NUMA_H | 1 | #ifndef _ASM_X8664_NUMA_H |
2 | #define _ASM_X8664_NUMA_H 1 | 2 | #define _ASM_X8664_NUMA_H 1 |
3 | 3 | ||
4 | #include <linux/nodemask.h> | 4 | #include <linux/nodemask.h> |
5 | #include <asm/apicdef.h> | 5 | #include <asm/apicdef.h> |
6 | 6 | ||
7 | struct bootnode { | 7 | struct bootnode { |
8 | u64 start,end; | 8 | u64 start; |
9 | u64 end; | ||
9 | }; | 10 | }; |
10 | 11 | ||
11 | extern int compute_hash_shift(struct bootnode *nodes, int numnodes); | 12 | extern int compute_hash_shift(struct bootnode *nodes, int numnodes); |
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h index 38f710dc37f2..94b86c31239a 100644 --- a/include/asm-x86/numaq.h +++ b/include/asm-x86/numaq.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2002, IBM Corp. | 4 | * Copyright (C) 2002, IBM Corp. |
5 | * | 5 | * |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
@@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void); | |||
33 | /* | 33 | /* |
34 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the | 34 | * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the |
35 | */ | 35 | */ |
36 | #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ | 36 | #define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private |
37 | quad space */ | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * Communication area for each processor on lynxer-processor tests. | 40 | * Communication area for each processor on lynxer-processor tests. |
@@ -139,7 +140,7 @@ struct sys_cfg_data { | |||
139 | unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ | 140 | unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ |
140 | unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ | 141 | unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ |
141 | /* may not be totally populated */ | 142 | /* may not be totally populated */ |
142 | unsigned int split_mem_enbl; /* 0 for no low shared memory */ | 143 | unsigned int split_mem_enbl; /* 0 for no low shared memory */ |
143 | unsigned int mmio_sz; /* Size of total system memory mapped I/O */ | 144 | unsigned int mmio_sz; /* Size of total system memory mapped I/O */ |
144 | /* (in MB). */ | 145 | /* (in MB). */ |
145 | unsigned int quad_spin_lock; /* Spare location used for quad */ | 146 | unsigned int quad_spin_lock; /* Spare location used for quad */ |
@@ -152,7 +153,7 @@ struct sys_cfg_data { | |||
152 | /* | 153 | /* |
153 | * memory configuration area for each quad | 154 | * memory configuration area for each quad |
154 | */ | 155 | */ |
155 | struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ | 156 | struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ |
156 | }; | 157 | }; |
157 | 158 | ||
158 | static inline unsigned long *get_zholes_size(int nid) | 159 | static inline unsigned long *get_zholes_size(int nid) |
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index a05b2896492f..6724a4bc6b7a 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h | |||
@@ -33,10 +33,8 @@ | |||
33 | 33 | ||
34 | #ifdef CONFIG_X86_64 | 34 | #ifdef CONFIG_X86_64 |
35 | #include <asm/page_64.h> | 35 | #include <asm/page_64.h> |
36 | #define max_pfn_mapped end_pfn_map | ||
37 | #else | 36 | #else |
38 | #include <asm/page_32.h> | 37 | #include <asm/page_32.h> |
39 | #define max_pfn_mapped max_low_pfn | ||
40 | #endif /* CONFIG_X86_64 */ | 38 | #endif /* CONFIG_X86_64 */ |
41 | 39 | ||
42 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | 40 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
@@ -50,6 +48,8 @@ | |||
50 | 48 | ||
51 | extern int page_is_ram(unsigned long pagenr); | 49 | extern int page_is_ram(unsigned long pagenr); |
52 | 50 | ||
51 | extern unsigned long max_pfn_mapped; | ||
52 | |||
53 | struct page; | 53 | struct page; |
54 | 54 | ||
55 | static inline void clear_user_page(void *page, unsigned long vaddr, | 55 | static inline void clear_user_page(void *page, unsigned long vaddr, |
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 5f7257fd589b..424e82f8ae27 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h | |||
@@ -47,7 +47,10 @@ typedef unsigned long pgdval_t; | |||
47 | typedef unsigned long pgprotval_t; | 47 | typedef unsigned long pgprotval_t; |
48 | typedef unsigned long phys_addr_t; | 48 | typedef unsigned long phys_addr_t; |
49 | 49 | ||
50 | typedef union { pteval_t pte, pte_low; } pte_t; | 50 | typedef union { |
51 | pteval_t pte; | ||
52 | pteval_t pte_low; | ||
53 | } pte_t; | ||
51 | 54 | ||
52 | #endif /* __ASSEMBLY__ */ | 55 | #endif /* __ASSEMBLY__ */ |
53 | #endif /* CONFIG_X86_PAE */ | 56 | #endif /* CONFIG_X86_PAE */ |
@@ -61,7 +64,7 @@ typedef struct page *pgtable_t; | |||
61 | #endif | 64 | #endif |
62 | 65 | ||
63 | #ifndef __ASSEMBLY__ | 66 | #ifndef __ASSEMBLY__ |
64 | #define __phys_addr(x) ((x)-PAGE_OFFSET) | 67 | #define __phys_addr(x) ((x) - PAGE_OFFSET) |
65 | #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) | 68 | #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) |
66 | 69 | ||
67 | #ifdef CONFIG_FLATMEM | 70 | #ifdef CONFIG_FLATMEM |
@@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE; | |||
78 | extern int sysctl_legacy_va_layout; | 81 | extern int sysctl_legacy_va_layout; |
79 | 82 | ||
80 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | 83 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) |
81 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) | 84 | #define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE) |
82 | 85 | ||
83 | #ifdef CONFIG_X86_USE_3DNOW | 86 | #ifdef CONFIG_X86_USE_3DNOW |
84 | #include <asm/mmx.h> | 87 | #include <asm/mmx.h> |
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index 143546073b95..6ea72859c491 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | #define THREAD_ORDER 1 | 6 | #define THREAD_ORDER 1 |
7 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) | 7 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
8 | #define CURRENT_MASK (~(THREAD_SIZE-1)) | 8 | #define CURRENT_MASK (~(THREAD_SIZE - 1)) |
9 | 9 | ||
10 | #define EXCEPTION_STACK_ORDER 0 | 10 | #define EXCEPTION_STACK_ORDER 0 |
11 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) | 11 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) |
@@ -48,10 +48,10 @@ | |||
48 | #define __VIRTUAL_MASK_SHIFT 48 | 48 | #define __VIRTUAL_MASK_SHIFT 48 |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * Kernel image size is limited to 128 MB (see level2_kernel_pgt in | 51 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in |
52 | * arch/x86/kernel/head_64.S), and it is mapped here: | 52 | * arch/x86/kernel/head_64.S), and it is mapped here: |
53 | */ | 53 | */ |
54 | #define KERNEL_IMAGE_SIZE (128*1024*1024) | 54 | #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) |
55 | #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) | 55 | #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) |
56 | 56 | ||
57 | #ifndef __ASSEMBLY__ | 57 | #ifndef __ASSEMBLY__ |
@@ -59,7 +59,6 @@ void clear_page(void *page); | |||
59 | void copy_page(void *to, void *from); | 59 | void copy_page(void *to, void *from); |
60 | 60 | ||
61 | extern unsigned long end_pfn; | 61 | extern unsigned long end_pfn; |
62 | extern unsigned long end_pfn_map; | ||
63 | extern unsigned long phys_base; | 62 | extern unsigned long phys_base; |
64 | 63 | ||
65 | extern unsigned long __phys_addr(unsigned long); | 64 | extern unsigned long __phys_addr(unsigned long); |
@@ -81,6 +80,9 @@ typedef struct { pteval_t pte; } pte_t; | |||
81 | 80 | ||
82 | #define vmemmap ((struct page *)VMEMMAP_START) | 81 | #define vmemmap ((struct page *)VMEMMAP_START) |
83 | 82 | ||
83 | extern unsigned long init_memory_mapping(unsigned long start, | ||
84 | unsigned long end); | ||
85 | |||
84 | #endif /* !__ASSEMBLY__ */ | 86 | #endif /* !__ASSEMBLY__ */ |
85 | 87 | ||
86 | #ifdef CONFIG_FLATMEM | 88 | #ifdef CONFIG_FLATMEM |
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h index c996ec4da0c8..6f0d0422f4ca 100644 --- a/include/asm-x86/param.h +++ b/include/asm-x86/param.h | |||
@@ -3,8 +3,8 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ | 5 | # define HZ CONFIG_HZ /* Internal kernel timer frequency */ |
6 | # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ | 6 | # define USER_HZ 100 /* some user interfaces are */ |
7 | # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ | 7 | # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ |
8 | #endif | 8 | #endif |
9 | 9 | ||
10 | #ifndef HZ | 10 | #ifndef HZ |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index d6236eb46466..3d419398499b 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -231,7 +231,8 @@ struct pv_mmu_ops { | |||
231 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | 231 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, |
232 | pte_t *ptep, pte_t pteval); | 232 | pte_t *ptep, pte_t pteval); |
233 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | 233 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
234 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 234 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, |
235 | pte_t *ptep); | ||
235 | void (*pte_update_defer)(struct mm_struct *mm, | 236 | void (*pte_update_defer)(struct mm_struct *mm, |
236 | unsigned long addr, pte_t *ptep); | 237 | unsigned long addr, pte_t *ptep); |
237 | 238 | ||
@@ -246,7 +247,8 @@ struct pv_mmu_ops { | |||
246 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | 247 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); |
247 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, | 248 | void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, |
248 | pte_t *ptep, pte_t pte); | 249 | pte_t *ptep, pte_t pte); |
249 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 250 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, |
251 | pte_t *ptep); | ||
250 | void (*pmd_clear)(pmd_t *pmdp); | 252 | void (*pmd_clear)(pmd_t *pmdp); |
251 | 253 | ||
252 | #endif /* CONFIG_X86_PAE */ | 254 | #endif /* CONFIG_X86_PAE */ |
@@ -274,8 +276,7 @@ struct pv_mmu_ops { | |||
274 | /* This contains all the paravirt structures: we get a convenient | 276 | /* This contains all the paravirt structures: we get a convenient |
275 | * number for each function using the offset which we use to indicate | 277 | * number for each function using the offset which we use to indicate |
276 | * what to patch. */ | 278 | * what to patch. */ |
277 | struct paravirt_patch_template | 279 | struct paravirt_patch_template { |
278 | { | ||
279 | struct pv_init_ops pv_init_ops; | 280 | struct pv_init_ops pv_init_ops; |
280 | struct pv_time_ops pv_time_ops; | 281 | struct pv_time_ops pv_time_ops; |
281 | struct pv_cpu_ops pv_cpu_ops; | 282 | struct pv_cpu_ops pv_cpu_ops; |
@@ -660,43 +661,56 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | |||
660 | } | 661 | } |
661 | 662 | ||
662 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 663 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
663 | #define rdmsr(msr,val1,val2) do { \ | 664 | #define rdmsr(msr, val1, val2) \ |
665 | do { \ | ||
664 | int _err; \ | 666 | int _err; \ |
665 | u64 _l = paravirt_read_msr(msr, &_err); \ | 667 | u64 _l = paravirt_read_msr(msr, &_err); \ |
666 | val1 = (u32)_l; \ | 668 | val1 = (u32)_l; \ |
667 | val2 = _l >> 32; \ | 669 | val2 = _l >> 32; \ |
668 | } while(0) | 670 | } while (0) |
669 | 671 | ||
670 | #define wrmsr(msr,val1,val2) do { \ | 672 | #define wrmsr(msr, val1, val2) \ |
673 | do { \ | ||
671 | paravirt_write_msr(msr, val1, val2); \ | 674 | paravirt_write_msr(msr, val1, val2); \ |
672 | } while(0) | 675 | } while (0) |
673 | 676 | ||
674 | #define rdmsrl(msr,val) do { \ | 677 | #define rdmsrl(msr, val) \ |
678 | do { \ | ||
675 | int _err; \ | 679 | int _err; \ |
676 | val = paravirt_read_msr(msr, &_err); \ | 680 | val = paravirt_read_msr(msr, &_err); \ |
677 | } while(0) | 681 | } while (0) |
678 | 682 | ||
679 | #define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) | 683 | #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) |
680 | #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) | 684 | #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) |
681 | 685 | ||
682 | /* rdmsr with exception handling */ | 686 | /* rdmsr with exception handling */ |
683 | #define rdmsr_safe(msr,a,b) ({ \ | 687 | #define rdmsr_safe(msr, a, b) \ |
688 | ({ \ | ||
684 | int _err; \ | 689 | int _err; \ |
685 | u64 _l = paravirt_read_msr(msr, &_err); \ | 690 | u64 _l = paravirt_read_msr(msr, &_err); \ |
686 | (*a) = (u32)_l; \ | 691 | (*a) = (u32)_l; \ |
687 | (*b) = _l >> 32; \ | 692 | (*b) = _l >> 32; \ |
688 | _err; }) | 693 | _err; \ |
694 | }) | ||
695 | |||
696 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | ||
697 | { | ||
698 | int err; | ||
689 | 699 | ||
700 | *p = paravirt_read_msr(msr, &err); | ||
701 | return err; | ||
702 | } | ||
690 | 703 | ||
691 | static inline u64 paravirt_read_tsc(void) | 704 | static inline u64 paravirt_read_tsc(void) |
692 | { | 705 | { |
693 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); | 706 | return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); |
694 | } | 707 | } |
695 | 708 | ||
696 | #define rdtscl(low) do { \ | 709 | #define rdtscl(low) \ |
710 | do { \ | ||
697 | u64 _l = paravirt_read_tsc(); \ | 711 | u64 _l = paravirt_read_tsc(); \ |
698 | low = (int)_l; \ | 712 | low = (int)_l; \ |
699 | } while(0) | 713 | } while (0) |
700 | 714 | ||
701 | #define rdtscll(val) (val = paravirt_read_tsc()) | 715 | #define rdtscll(val) (val = paravirt_read_tsc()) |
702 | 716 | ||
@@ -711,11 +725,12 @@ static inline unsigned long long paravirt_read_pmc(int counter) | |||
711 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); | 725 | return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
712 | } | 726 | } |
713 | 727 | ||
714 | #define rdpmc(counter,low,high) do { \ | 728 | #define rdpmc(counter, low, high) \ |
729 | do { \ | ||
715 | u64 _l = paravirt_read_pmc(counter); \ | 730 | u64 _l = paravirt_read_pmc(counter); \ |
716 | low = (u32)_l; \ | 731 | low = (u32)_l; \ |
717 | high = _l >> 32; \ | 732 | high = _l >> 32; \ |
718 | } while(0) | 733 | } while (0) |
719 | 734 | ||
720 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) | 735 | static inline unsigned long long paravirt_rdtscp(unsigned int *aux) |
721 | { | 736 | { |
@@ -794,7 +809,8 @@ static inline void set_iopl_mask(unsigned mask) | |||
794 | } | 809 | } |
795 | 810 | ||
796 | /* The paravirtualized I/O functions */ | 811 | /* The paravirtualized I/O functions */ |
797 | static inline void slow_down_io(void) { | 812 | static inline void slow_down_io(void) |
813 | { | ||
798 | pv_cpu_ops.io_delay(); | 814 | pv_cpu_ops.io_delay(); |
799 | #ifdef REALLY_SLOW_IO | 815 | #ifdef REALLY_SLOW_IO |
800 | pv_cpu_ops.io_delay(); | 816 | pv_cpu_ops.io_delay(); |
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h index 019cbca24a38..3c4ffeb467e9 100644 --- a/include/asm-x86/parport.h +++ b/include/asm-x86/parport.h | |||
@@ -1,10 +1,10 @@ | |||
1 | #ifndef _ASM_X86_PARPORT_H | 1 | #ifndef _ASM_X86_PARPORT_H |
2 | #define _ASM_X86_PARPORT_H | 2 | #define _ASM_X86_PARPORT_H |
3 | 3 | ||
4 | static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); | 4 | static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); |
5 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) | 5 | static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) |
6 | { | 6 | { |
7 | return parport_pc_find_isa_ports (autoirq, autodma); | 7 | return parport_pc_find_isa_ports(autoirq, autodma); |
8 | } | 8 | } |
9 | 9 | ||
10 | #endif /* _ASM_X86_PARPORT_H */ | 10 | #endif /* _ASM_X86_PARPORT_H */ |
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h new file mode 100644 index 000000000000..8b822b5a1786 --- /dev/null +++ b/include/asm-x86/pat.h | |||
@@ -0,0 +1,16 @@ | |||
1 | |||
2 | #ifndef _ASM_PAT_H | ||
3 | #define _ASM_PAT_H 1 | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | |||
7 | extern int pat_wc_enabled; | ||
8 | |||
9 | extern void pat_init(void); | ||
10 | |||
11 | extern int reserve_memtype(u64 start, u64 end, | ||
12 | unsigned long req_type, unsigned long *ret_type); | ||
13 | extern int free_memtype(u64 start, u64 end); | ||
14 | |||
15 | #endif | ||
16 | |||
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h index 6823fa4f1afa..5b21485be573 100644 --- a/include/asm-x86/pci-direct.h +++ b/include/asm-x86/pci-direct.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | /* Direct PCI access. This is used for PCI accesses in early boot before | 6 | /* Direct PCI access. This is used for PCI accesses in early boot before |
7 | the PCI subsystem works. */ | 7 | the PCI subsystem works. */ |
8 | 8 | ||
9 | extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); | 9 | extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); |
10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); | 10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); |
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h index c61190cb9e12..ddd8e248fc0a 100644 --- a/include/asm-x86/pci.h +++ b/include/asm-x86/pci.h | |||
@@ -8,14 +8,13 @@ | |||
8 | #include <asm/scatterlist.h> | 8 | #include <asm/scatterlist.h> |
9 | #include <asm/io.h> | 9 | #include <asm/io.h> |
10 | 10 | ||
11 | |||
12 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
13 | 12 | ||
14 | struct pci_sysdata { | 13 | struct pci_sysdata { |
15 | int domain; /* PCI domain */ | 14 | int domain; /* PCI domain */ |
16 | int node; /* NUMA node */ | 15 | int node; /* NUMA node */ |
17 | #ifdef CONFIG_X86_64 | 16 | #ifdef CONFIG_X86_64 |
18 | void* iommu; /* IOMMU private data */ | 17 | void *iommu; /* IOMMU private data */ |
19 | #endif | 18 | #endif |
20 | }; | 19 | }; |
21 | 20 | ||
@@ -52,7 +51,7 @@ extern unsigned long pci_mem_start; | |||
52 | #define PCIBIOS_MIN_CARDBUS_IO 0x4000 | 51 | #define PCIBIOS_MIN_CARDBUS_IO 0x4000 |
53 | 52 | ||
54 | void pcibios_config_init(void); | 53 | void pcibios_config_init(void); |
55 | struct pci_bus * pcibios_scan_root(int bus); | 54 | struct pci_bus *pcibios_scan_root(int bus); |
56 | 55 | ||
57 | void pcibios_set_master(struct pci_dev *dev); | 56 | void pcibios_set_master(struct pci_dev *dev); |
58 | void pcibios_penalize_isa_irq(int irq, int active); | 57 | void pcibios_penalize_isa_irq(int irq, int active); |
@@ -62,7 +61,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); | |||
62 | 61 | ||
63 | #define HAVE_PCI_MMAP | 62 | #define HAVE_PCI_MMAP |
64 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 63 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
65 | enum pci_mmap_state mmap_state, int write_combine); | 64 | enum pci_mmap_state mmap_state, |
65 | int write_combine); | ||
66 | 66 | ||
67 | 67 | ||
68 | #ifdef CONFIG_PCI | 68 | #ifdef CONFIG_PCI |
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h index 374690314539..df867e5d80b1 100644 --- a/include/asm-x86/pci_64.h +++ b/include/asm-x86/pci_64.h | |||
@@ -1,12 +1,10 @@ | |||
1 | #ifndef __x8664_PCI_H | 1 | #ifndef __x8664_PCI_H |
2 | #define __x8664_PCI_H | 2 | #define __x8664_PCI_H |
3 | 3 | ||
4 | |||
5 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
6 | 5 | ||
7 | |||
8 | #ifdef CONFIG_CALGARY_IOMMU | 6 | #ifdef CONFIG_CALGARY_IOMMU |
9 | static inline void* pci_iommu(struct pci_bus *bus) | 7 | static inline void *pci_iommu(struct pci_bus *bus) |
10 | { | 8 | { |
11 | struct pci_sysdata *sd = bus->sysdata; | 9 | struct pci_sysdata *sd = bus->sysdata; |
12 | return sd->iommu; | 10 | return sd->iommu; |
@@ -19,11 +17,10 @@ static inline void set_pci_iommu(struct pci_bus *bus, void *val) | |||
19 | } | 17 | } |
20 | #endif /* CONFIG_CALGARY_IOMMU */ | 18 | #endif /* CONFIG_CALGARY_IOMMU */ |
21 | 19 | ||
22 | 20 | extern int (*pci_config_read)(int seg, int bus, int dev, int fn, | |
23 | extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value); | 21 | int reg, int len, u32 *value); |
24 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value); | 22 | extern int (*pci_config_write)(int seg, int bus, int dev, int fn, |
25 | 23 | int reg, int len, u32 value); | |
26 | |||
27 | 24 | ||
28 | extern void pci_iommu_alloc(void); | 25 | extern void pci_iommu_alloc(void); |
29 | 26 | ||
@@ -65,5 +62,4 @@ extern void pci_iommu_alloc(void); | |||
65 | 62 | ||
66 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
67 | 64 | ||
68 | |||
69 | #endif /* __x8664_PCI_H */ | 65 | #endif /* __x8664_PCI_H */ |
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h index c0305bff0f19..101fb9e11954 100644 --- a/include/asm-x86/pda.h +++ b/include/asm-x86/pda.h | |||
@@ -22,7 +22,6 @@ struct x8664_pda { | |||
22 | offset 40!!! */ | 22 | offset 40!!! */ |
23 | #endif | 23 | #endif |
24 | char *irqstackptr; | 24 | char *irqstackptr; |
25 | unsigned int nodenumber; /* number of current node */ | ||
26 | unsigned int __softirq_pending; | 25 | unsigned int __softirq_pending; |
27 | unsigned int __nmi_count; /* number of NMI on this CPUs */ | 26 | unsigned int __nmi_count; /* number of NMI on this CPUs */ |
28 | short mmu_state; | 27 | short mmu_state; |
@@ -58,34 +57,36 @@ extern struct x8664_pda _proxy_pda; | |||
58 | 57 | ||
59 | #define pda_offset(field) offsetof(struct x8664_pda, field) | 58 | #define pda_offset(field) offsetof(struct x8664_pda, field) |
60 | 59 | ||
61 | #define pda_to_op(op, field, val) do { \ | 60 | #define pda_to_op(op, field, val) \ |
62 | typedef typeof(_proxy_pda.field) T__; \ | 61 | do { \ |
63 | if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ | 62 | typedef typeof(_proxy_pda.field) T__; \ |
64 | switch (sizeof(_proxy_pda.field)) { \ | 63 | if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \ |
65 | case 2: \ | 64 | switch (sizeof(_proxy_pda.field)) { \ |
66 | asm(op "w %1,%%gs:%c2" : \ | 65 | case 2: \ |
67 | "+m" (_proxy_pda.field) : \ | 66 | asm(op "w %1,%%gs:%c2" : \ |
68 | "ri" ((T__)val), \ | 67 | "+m" (_proxy_pda.field) : \ |
69 | "i"(pda_offset(field))); \ | 68 | "ri" ((T__)val), \ |
70 | break; \ | 69 | "i"(pda_offset(field))); \ |
71 | case 4: \ | 70 | break; \ |
72 | asm(op "l %1,%%gs:%c2" : \ | 71 | case 4: \ |
73 | "+m" (_proxy_pda.field) : \ | 72 | asm(op "l %1,%%gs:%c2" : \ |
74 | "ri" ((T__)val), \ | 73 | "+m" (_proxy_pda.field) : \ |
75 | "i" (pda_offset(field))); \ | 74 | "ri" ((T__)val), \ |
76 | break; \ | 75 | "i" (pda_offset(field))); \ |
77 | case 8: \ | 76 | break; \ |
78 | asm(op "q %1,%%gs:%c2": \ | 77 | case 8: \ |
79 | "+m" (_proxy_pda.field) : \ | 78 | asm(op "q %1,%%gs:%c2": \ |
80 | "ri" ((T__)val), \ | 79 | "+m" (_proxy_pda.field) : \ |
81 | "i"(pda_offset(field))); \ | 80 | "ri" ((T__)val), \ |
82 | break; \ | 81 | "i"(pda_offset(field))); \ |
83 | default: \ | 82 | break; \ |
84 | __bad_pda_field(); \ | 83 | default: \ |
85 | } \ | 84 | __bad_pda_field(); \ |
86 | } while (0) | 85 | } \ |
86 | } while (0) | ||
87 | 87 | ||
88 | #define pda_from_op(op,field) ({ \ | 88 | #define pda_from_op(op, field) \ |
89 | ({ \ | ||
89 | typeof(_proxy_pda.field) ret__; \ | 90 | typeof(_proxy_pda.field) ret__; \ |
90 | switch (sizeof(_proxy_pda.field)) { \ | 91 | switch (sizeof(_proxy_pda.field)) { \ |
91 | case 2: \ | 92 | case 2: \ |
@@ -93,23 +94,24 @@ extern struct x8664_pda _proxy_pda; | |||
93 | "=r" (ret__) : \ | 94 | "=r" (ret__) : \ |
94 | "i" (pda_offset(field)), \ | 95 | "i" (pda_offset(field)), \ |
95 | "m" (_proxy_pda.field)); \ | 96 | "m" (_proxy_pda.field)); \ |
96 | break; \ | 97 | break; \ |
97 | case 4: \ | 98 | case 4: \ |
98 | asm(op "l %%gs:%c1,%0": \ | 99 | asm(op "l %%gs:%c1,%0": \ |
99 | "=r" (ret__): \ | 100 | "=r" (ret__): \ |
100 | "i" (pda_offset(field)), \ | 101 | "i" (pda_offset(field)), \ |
101 | "m" (_proxy_pda.field)); \ | 102 | "m" (_proxy_pda.field)); \ |
102 | break; \ | 103 | break; \ |
103 | case 8: \ | 104 | case 8: \ |
104 | asm(op "q %%gs:%c1,%0": \ | 105 | asm(op "q %%gs:%c1,%0": \ |
105 | "=r" (ret__) : \ | 106 | "=r" (ret__) : \ |
106 | "i" (pda_offset(field)), \ | 107 | "i" (pda_offset(field)), \ |
107 | "m" (_proxy_pda.field)); \ | 108 | "m" (_proxy_pda.field)); \ |
108 | break; \ | 109 | break; \ |
109 | default: \ | 110 | default: \ |
110 | __bad_pda_field(); \ | 111 | __bad_pda_field(); \ |
111 | } \ | 112 | } \ |
112 | ret__; }) | 113 | ret__; \ |
114 | }) | ||
113 | 115 | ||
114 | #define read_pda(field) pda_from_op("mov", field) | 116 | #define read_pda(field) pda_from_op("mov", field) |
115 | #define write_pda(field, val) pda_to_op("mov", field, val) | 117 | #define write_pda(field, val) pda_to_op("mov", field, val) |
@@ -118,12 +120,13 @@ extern struct x8664_pda _proxy_pda; | |||
118 | #define or_pda(field, val) pda_to_op("or", field, val) | 120 | #define or_pda(field, val) pda_to_op("or", field, val) |
119 | 121 | ||
120 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ | 122 | /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
121 | #define test_and_clear_bit_pda(bit, field) ({ \ | 123 | #define test_and_clear_bit_pda(bit, field) \ |
122 | int old__; \ | 124 | ({ \ |
123 | asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ | 125 | int old__; \ |
124 | : "=r" (old__), "+m" (_proxy_pda.field) \ | 126 | asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \ |
125 | : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \ | 127 | : "=r" (old__), "+m" (_proxy_pda.field) \ |
126 | old__; \ | 128 | : "dIr" (bit), "i" (pda_offset(field)) : "memory");\ |
129 | old__; \ | ||
127 | }) | 130 | }) |
128 | 131 | ||
129 | #endif | 132 | #endif |
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 0dec00f27eb4..736fc3bb8e1e 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h | |||
@@ -85,58 +85,62 @@ DECLARE_PER_CPU(unsigned long, this_cpu_off); | |||
85 | * don't give an lvalue though). */ | 85 | * don't give an lvalue though). */ |
86 | extern void __bad_percpu_size(void); | 86 | extern void __bad_percpu_size(void); |
87 | 87 | ||
88 | #define percpu_to_op(op,var,val) \ | 88 | #define percpu_to_op(op, var, val) \ |
89 | do { \ | 89 | do { \ |
90 | typedef typeof(var) T__; \ | 90 | typedef typeof(var) T__; \ |
91 | if (0) { T__ tmp__; tmp__ = (val); } \ | 91 | if (0) { \ |
92 | switch (sizeof(var)) { \ | 92 | T__ tmp__; \ |
93 | case 1: \ | 93 | tmp__ = (val); \ |
94 | asm(op "b %1,"__percpu_seg"%0" \ | 94 | } \ |
95 | : "+m" (var) \ | 95 | switch (sizeof(var)) { \ |
96 | :"ri" ((T__)val)); \ | 96 | case 1: \ |
97 | break; \ | 97 | asm(op "b %1,"__percpu_seg"%0" \ |
98 | case 2: \ | 98 | : "+m" (var) \ |
99 | asm(op "w %1,"__percpu_seg"%0" \ | 99 | : "ri" ((T__)val)); \ |
100 | : "+m" (var) \ | 100 | break; \ |
101 | :"ri" ((T__)val)); \ | 101 | case 2: \ |
102 | break; \ | 102 | asm(op "w %1,"__percpu_seg"%0" \ |
103 | case 4: \ | 103 | : "+m" (var) \ |
104 | asm(op "l %1,"__percpu_seg"%0" \ | 104 | : "ri" ((T__)val)); \ |
105 | : "+m" (var) \ | 105 | break; \ |
106 | :"ri" ((T__)val)); \ | 106 | case 4: \ |
107 | break; \ | 107 | asm(op "l %1,"__percpu_seg"%0" \ |
108 | default: __bad_percpu_size(); \ | 108 | : "+m" (var) \ |
109 | } \ | 109 | : "ri" ((T__)val)); \ |
110 | } while (0) | 110 | break; \ |
111 | 111 | default: __bad_percpu_size(); \ | |
112 | #define percpu_from_op(op,var) \ | 112 | } \ |
113 | ({ \ | 113 | } while (0) |
114 | typeof(var) ret__; \ | 114 | |
115 | switch (sizeof(var)) { \ | 115 | #define percpu_from_op(op, var) \ |
116 | case 1: \ | 116 | ({ \ |
117 | asm(op "b "__percpu_seg"%1,%0" \ | 117 | typeof(var) ret__; \ |
118 | : "=r" (ret__) \ | 118 | switch (sizeof(var)) { \ |
119 | : "m" (var)); \ | 119 | case 1: \ |
120 | break; \ | 120 | asm(op "b "__percpu_seg"%1,%0" \ |
121 | case 2: \ | 121 | : "=r" (ret__) \ |
122 | asm(op "w "__percpu_seg"%1,%0" \ | 122 | : "m" (var)); \ |
123 | : "=r" (ret__) \ | 123 | break; \ |
124 | : "m" (var)); \ | 124 | case 2: \ |
125 | break; \ | 125 | asm(op "w "__percpu_seg"%1,%0" \ |
126 | case 4: \ | 126 | : "=r" (ret__) \ |
127 | asm(op "l "__percpu_seg"%1,%0" \ | 127 | : "m" (var)); \ |
128 | : "=r" (ret__) \ | 128 | break; \ |
129 | : "m" (var)); \ | 129 | case 4: \ |
130 | break; \ | 130 | asm(op "l "__percpu_seg"%1,%0" \ |
131 | default: __bad_percpu_size(); \ | 131 | : "=r" (ret__) \ |
132 | } \ | 132 | : "m" (var)); \ |
133 | ret__; }) | 133 | break; \ |
134 | default: __bad_percpu_size(); \ | ||
135 | } \ | ||
136 | ret__; \ | ||
137 | }) | ||
134 | 138 | ||
135 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) | 139 | #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) |
136 | #define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) | 140 | #define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val) |
137 | #define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) | 141 | #define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val) |
138 | #define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) | 142 | #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val) |
139 | #define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) | 143 | #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val) |
140 | #endif /* !__ASSEMBLY__ */ | 144 | #endif /* !__ASSEMBLY__ */ |
141 | #endif /* !CONFIG_X86_64 */ | 145 | #endif /* !CONFIG_X86_64 */ |
142 | #endif /* _ASM_X86_PERCPU_H_ */ | 146 | #endif /* _ASM_X86_PERCPU_H_ */ |
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index 701404fab308..46bc52c0eae1 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h | |||
@@ -26,7 +26,8 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
26 | native_set_pte(ptep, pte); | 26 | native_set_pte(ptep, pte); |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, | 29 | static inline void native_set_pte_present(struct mm_struct *mm, |
30 | unsigned long addr, | ||
30 | pte_t *ptep, pte_t pte) | 31 | pte_t *ptep, pte_t pte) |
31 | { | 32 | { |
32 | native_set_pte(ptep, pte); | 33 | native_set_pte(ptep, pte); |
@@ -37,7 +38,8 @@ static inline void native_pmd_clear(pmd_t *pmdp) | |||
37 | native_set_pmd(pmdp, __pmd(0)); | 38 | native_set_pmd(pmdp, __pmd(0)); |
38 | } | 39 | } |
39 | 40 | ||
40 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) | 41 | static inline void native_pte_clear(struct mm_struct *mm, |
42 | unsigned long addr, pte_t *xp) | ||
41 | { | 43 | { |
42 | *xp = native_make_pte(0); | 44 | *xp = native_make_pte(0); |
43 | } | 45 | } |
@@ -61,16 +63,18 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
61 | */ | 63 | */ |
62 | #define PTE_FILE_MAX_BITS 29 | 64 | #define PTE_FILE_MAX_BITS 29 |
63 | 65 | ||
64 | #define pte_to_pgoff(pte) \ | 66 | #define pte_to_pgoff(pte) \ |
65 | ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) | 67 | ((((pte).pte_low >> 1) & 0x1f) + (((pte).pte_low >> 8) << 5)) |
66 | 68 | ||
67 | #define pgoff_to_pte(off) \ | 69 | #define pgoff_to_pte(off) \ |
68 | ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) | 70 | ((pte_t) { .pte_low = (((off) & 0x1f) << 1) + \ |
71 | (((off) >> 5) << 8) + _PAGE_FILE }) | ||
69 | 72 | ||
70 | /* Encode and de-code a swap entry */ | 73 | /* Encode and de-code a swap entry */ |
71 | #define __swp_type(x) (((x).val >> 1) & 0x1f) | 74 | #define __swp_type(x) (((x).val >> 1) & 0x1f) |
72 | #define __swp_offset(x) ((x).val >> 8) | 75 | #define __swp_offset(x) ((x).val >> 8) |
73 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 76 | #define __swp_entry(type, offset) \ |
77 | ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | ||
74 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) | 78 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
75 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) | 79 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
76 | 80 | ||
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 1d763eec740f..8b4a9d44b7f4 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h | |||
@@ -8,22 +8,26 @@ | |||
8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | 8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pte_ERROR(e) \ | 11 | #define pte_ERROR(e) \ |
12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) | 12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", \ |
13 | #define pmd_ERROR(e) \ | 13 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
14 | printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | 14 | #define pmd_ERROR(e) \ |
15 | #define pgd_ERROR(e) \ | 15 | printk("%s:%d: bad pmd %p(%016Lx).\n", \ |
16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 16 | __FILE__, __LINE__, &(e), pmd_val(e)) |
17 | 17 | #define pgd_ERROR(e) \ | |
18 | printk("%s:%d: bad pgd %p(%016Lx).\n", \ | ||
19 | __FILE__, __LINE__, &(e), pgd_val(e)) | ||
18 | 20 | ||
19 | static inline int pud_none(pud_t pud) | 21 | static inline int pud_none(pud_t pud) |
20 | { | 22 | { |
21 | return pud_val(pud) == 0; | 23 | return pud_val(pud) == 0; |
22 | } | 24 | } |
25 | |||
23 | static inline int pud_bad(pud_t pud) | 26 | static inline int pud_bad(pud_t pud) |
24 | { | 27 | { |
25 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; | 28 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; |
26 | } | 29 | } |
30 | |||
27 | static inline int pud_present(pud_t pud) | 31 | static inline int pud_present(pud_t pud) |
28 | { | 32 | { |
29 | return pud_val(pud) & _PAGE_PRESENT; | 33 | return pud_val(pud) & _PAGE_PRESENT; |
@@ -48,7 +52,8 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
48 | * we are justified in merely clearing the PTE present bit, followed | 52 | * we are justified in merely clearing the PTE present bit, followed |
49 | * by a set. The ordering here is important. | 53 | * by a set. The ordering here is important. |
50 | */ | 54 | */ |
51 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, | 55 | static inline void native_set_pte_present(struct mm_struct *mm, |
56 | unsigned long addr, | ||
52 | pte_t *ptep, pte_t pte) | 57 | pte_t *ptep, pte_t pte) |
53 | { | 58 | { |
54 | ptep->pte_low = 0; | 59 | ptep->pte_low = 0; |
@@ -60,15 +65,17 @@ static inline void native_set_pte_present(struct mm_struct *mm, unsigned long ad | |||
60 | 65 | ||
61 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) | 66 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
62 | { | 67 | { |
63 | set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); | 68 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
64 | } | 69 | } |
70 | |||
65 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | 71 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
66 | { | 72 | { |
67 | set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); | 73 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
68 | } | 74 | } |
75 | |||
69 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | 76 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
70 | { | 77 | { |
71 | set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); | 78 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
72 | } | 79 | } |
73 | 80 | ||
74 | /* | 81 | /* |
@@ -76,7 +83,8 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud) | |||
76 | * entry, so clear the bottom half first and enforce ordering with a compiler | 83 | * entry, so clear the bottom half first and enforce ordering with a compiler |
77 | * barrier. | 84 | * barrier. |
78 | */ | 85 | */ |
79 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 86 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
87 | pte_t *ptep) | ||
80 | { | 88 | { |
81 | ptep->pte_low = 0; | 89 | ptep->pte_low = 0; |
82 | smp_wmb(); | 90 | smp_wmb(); |
@@ -107,20 +115,19 @@ static inline void pud_clear(pud_t *pudp) | |||
107 | * current pgd to avoid unnecessary TLB flushes. | 115 | * current pgd to avoid unnecessary TLB flushes. |
108 | */ | 116 | */ |
109 | pgd = read_cr3(); | 117 | pgd = read_cr3(); |
110 | if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) | 118 | if (__pa(pudp) >= pgd && __pa(pudp) < |
119 | (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) | ||
111 | write_cr3(pgd); | 120 | write_cr3(pgd); |
112 | } | 121 | } |
113 | 122 | ||
114 | #define pud_page(pud) \ | 123 | #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK)) |
115 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
116 | 124 | ||
117 | #define pud_page_vaddr(pud) \ | 125 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) |
118 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
119 | 126 | ||
120 | 127 | ||
121 | /* Find an entry in the second-level page table.. */ | 128 | /* Find an entry in the second-level page table.. */ |
122 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | 129 | #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \ |
123 | pmd_index(address)) | 130 | pmd_index(address)) |
124 | 131 | ||
125 | #ifdef CONFIG_SMP | 132 | #ifdef CONFIG_SMP |
126 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) | 133 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
@@ -161,7 +168,8 @@ static inline unsigned long pte_pfn(pte_t pte) | |||
161 | * put the 32 bits of offset into the high part. | 168 | * put the 32 bits of offset into the high part. |
162 | */ | 169 | */ |
163 | #define pte_to_pgoff(pte) ((pte).pte_high) | 170 | #define pte_to_pgoff(pte) ((pte).pte_high) |
164 | #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) | 171 | #define pgoff_to_pte(off) \ |
172 | ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) | ||
165 | #define PTE_FILE_MAX_BITS 32 | 173 | #define PTE_FILE_MAX_BITS 32 |
166 | 174 | ||
167 | /* Encode and de-code a swap entry */ | 175 | /* Encode and de-code a swap entry */ |
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 9cf472aeb9ce..f1d9f4a03f6f 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -4,13 +4,13 @@ | |||
4 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) | 4 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) |
5 | #define FIRST_USER_ADDRESS 0 | 5 | #define FIRST_USER_ADDRESS 0 |
6 | 6 | ||
7 | #define _PAGE_BIT_PRESENT 0 | 7 | #define _PAGE_BIT_PRESENT 0 /* is present */ |
8 | #define _PAGE_BIT_RW 1 | 8 | #define _PAGE_BIT_RW 1 /* writeable */ |
9 | #define _PAGE_BIT_USER 2 | 9 | #define _PAGE_BIT_USER 2 /* userspace addressable */ |
10 | #define _PAGE_BIT_PWT 3 | 10 | #define _PAGE_BIT_PWT 3 /* page write through */ |
11 | #define _PAGE_BIT_PCD 4 | 11 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ |
12 | #define _PAGE_BIT_ACCESSED 5 | 12 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ |
13 | #define _PAGE_BIT_DIRTY 6 | 13 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ |
14 | #define _PAGE_BIT_FILE 6 | 14 | #define _PAGE_BIT_FILE 6 |
15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | 15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ |
16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ | 16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ |
@@ -48,24 +48,39 @@ | |||
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* If _PAGE_PRESENT is clear, we use these: */ | 50 | /* If _PAGE_PRESENT is clear, we use these: */ |
51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ | 51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, |
52 | * saved PTE; unset:swap */ | ||
52 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; | 53 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; |
53 | pte_present gives true */ | 54 | pte_present gives true */ |
54 | 55 | ||
55 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 56 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
56 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 57 | _PAGE_ACCESSED | _PAGE_DIRTY) |
58 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | ||
59 | _PAGE_DIRTY) | ||
57 | 60 | ||
58 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 61 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
59 | 62 | ||
60 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 63 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) |
61 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 64 | #define _PAGE_CACHE_WB (0) |
65 | #define _PAGE_CACHE_WC (_PAGE_PWT) | ||
66 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | ||
67 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | ||
62 | 68 | ||
63 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | 69 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
64 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 70 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
65 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 71 | _PAGE_ACCESSED | _PAGE_NX) |
72 | |||
73 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ | ||
74 | _PAGE_USER | _PAGE_ACCESSED) | ||
75 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
76 | _PAGE_ACCESSED | _PAGE_NX) | ||
77 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
78 | _PAGE_ACCESSED) | ||
66 | #define PAGE_COPY PAGE_COPY_NOEXEC | 79 | #define PAGE_COPY PAGE_COPY_NOEXEC |
67 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 80 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
68 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 81 | _PAGE_ACCESSED | _PAGE_NX) |
82 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
83 | _PAGE_ACCESSED) | ||
69 | 84 | ||
70 | #ifdef CONFIG_X86_32 | 85 | #ifdef CONFIG_X86_32 |
71 | #define _PAGE_KERNEL_EXEC \ | 86 | #define _PAGE_KERNEL_EXEC \ |
@@ -84,6 +99,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
84 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | 99 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
85 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | 100 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
86 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) | 101 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) |
102 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) | ||
87 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | 103 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) |
88 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) | 104 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) |
89 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | 105 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) |
@@ -101,6 +117,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
101 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | 117 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) |
102 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | 118 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) |
103 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) | 119 | #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) |
120 | #define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) | ||
104 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | 121 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) |
105 | #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) | 122 | #define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) |
106 | #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) | 123 | #define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE) |
@@ -134,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
134 | * ZERO_PAGE is a global shared page that is always zero: used | 151 | * ZERO_PAGE is a global shared page that is always zero: used |
135 | * for zero-mapped memory areas etc.. | 152 | * for zero-mapped memory areas etc.. |
136 | */ | 153 | */ |
137 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | 154 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
138 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 155 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
139 | 156 | ||
140 | extern spinlock_t pgd_lock; | 157 | extern spinlock_t pgd_lock; |
@@ -144,30 +161,101 @@ extern struct list_head pgd_list; | |||
144 | * The following only work if pte_present() is true. | 161 | * The following only work if pte_present() is true. |
145 | * Undefined behaviour if not.. | 162 | * Undefined behaviour if not.. |
146 | */ | 163 | */ |
147 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 164 | static inline int pte_dirty(pte_t pte) |
148 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 165 | { |
149 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | 166 | return pte_val(pte) & _PAGE_DIRTY; |
150 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 167 | } |
151 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | 168 | |
152 | static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } | 169 | static inline int pte_young(pte_t pte) |
153 | static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } | 170 | { |
154 | 171 | return pte_val(pte) & _PAGE_ACCESSED; | |
155 | static inline int pmd_large(pmd_t pte) { | 172 | } |
156 | return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | 173 | |
157 | (_PAGE_PSE|_PAGE_PRESENT); | 174 | static inline int pte_write(pte_t pte) |
175 | { | ||
176 | return pte_val(pte) & _PAGE_RW; | ||
177 | } | ||
178 | |||
179 | static inline int pte_file(pte_t pte) | ||
180 | { | ||
181 | return pte_val(pte) & _PAGE_FILE; | ||
182 | } | ||
183 | |||
184 | static inline int pte_huge(pte_t pte) | ||
185 | { | ||
186 | return pte_val(pte) & _PAGE_PSE; | ||
187 | } | ||
188 | |||
189 | static inline int pte_global(pte_t pte) | ||
190 | { | ||
191 | return pte_val(pte) & _PAGE_GLOBAL; | ||
192 | } | ||
193 | |||
194 | static inline int pte_exec(pte_t pte) | ||
195 | { | ||
196 | return !(pte_val(pte) & _PAGE_NX); | ||
197 | } | ||
198 | |||
199 | static inline int pmd_large(pmd_t pte) | ||
200 | { | ||
201 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | ||
202 | (_PAGE_PSE | _PAGE_PRESENT); | ||
203 | } | ||
204 | |||
205 | static inline pte_t pte_mkclean(pte_t pte) | ||
206 | { | ||
207 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); | ||
208 | } | ||
209 | |||
210 | static inline pte_t pte_mkold(pte_t pte) | ||
211 | { | ||
212 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); | ||
213 | } | ||
214 | |||
215 | static inline pte_t pte_wrprotect(pte_t pte) | ||
216 | { | ||
217 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); | ||
218 | } | ||
219 | |||
220 | static inline pte_t pte_mkexec(pte_t pte) | ||
221 | { | ||
222 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); | ||
223 | } | ||
224 | |||
225 | static inline pte_t pte_mkdirty(pte_t pte) | ||
226 | { | ||
227 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
228 | } | ||
229 | |||
230 | static inline pte_t pte_mkyoung(pte_t pte) | ||
231 | { | ||
232 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
158 | } | 233 | } |
159 | 234 | ||
160 | static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } | 235 | static inline pte_t pte_mkwrite(pte_t pte) |
161 | static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } | 236 | { |
162 | static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } | 237 | return __pte(pte_val(pte) | _PAGE_RW); |
163 | static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } | 238 | } |
164 | static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } | 239 | |
165 | static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } | 240 | static inline pte_t pte_mkhuge(pte_t pte) |
166 | static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } | 241 | { |
167 | static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } | 242 | return __pte(pte_val(pte) | _PAGE_PSE); |
168 | static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } | 243 | } |
169 | static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } | 244 | |
170 | static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } | 245 | static inline pte_t pte_clrhuge(pte_t pte) |
246 | { | ||
247 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); | ||
248 | } | ||
249 | |||
250 | static inline pte_t pte_mkglobal(pte_t pte) | ||
251 | { | ||
252 | return __pte(pte_val(pte) | _PAGE_GLOBAL); | ||
253 | } | ||
254 | |||
255 | static inline pte_t pte_clrglobal(pte_t pte) | ||
256 | { | ||
257 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); | ||
258 | } | ||
171 | 259 | ||
172 | extern pteval_t __supported_pte_mask; | 260 | extern pteval_t __supported_pte_mask; |
173 | 261 | ||
@@ -334,7 +422,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
334 | }) | 422 | }) |
335 | 423 | ||
336 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 424 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
337 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 425 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
426 | pte_t *ptep) | ||
338 | { | 427 | { |
339 | pte_t pte = native_ptep_get_and_clear(ptep); | 428 | pte_t pte = native_ptep_get_and_clear(ptep); |
340 | pte_update(mm, addr, ptep); | 429 | pte_update(mm, addr, ptep); |
@@ -342,7 +431,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
342 | } | 431 | } |
343 | 432 | ||
344 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 433 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
345 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 434 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
435 | unsigned long addr, pte_t *ptep, | ||
436 | int full) | ||
346 | { | 437 | { |
347 | pte_t pte; | 438 | pte_t pte; |
348 | if (full) { | 439 | if (full) { |
@@ -358,7 +449,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
358 | } | 449 | } |
359 | 450 | ||
360 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 451 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
361 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 452 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
453 | unsigned long addr, pte_t *ptep) | ||
362 | { | 454 | { |
363 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); | 455 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
364 | pte_update(mm, addr, ptep); | 456 | pte_update(mm, addr, ptep); |
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 4e6a0fca0b47..c4a643674458 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -40,13 +40,13 @@ void paging_init(void); | |||
40 | #ifdef CONFIG_X86_PAE | 40 | #ifdef CONFIG_X86_PAE |
41 | # include <asm/pgtable-3level-defs.h> | 41 | # include <asm/pgtable-3level-defs.h> |
42 | # define PMD_SIZE (1UL << PMD_SHIFT) | 42 | # define PMD_SIZE (1UL << PMD_SHIFT) |
43 | # define PMD_MASK (~(PMD_SIZE-1)) | 43 | # define PMD_MASK (~(PMD_SIZE - 1)) |
44 | #else | 44 | #else |
45 | # include <asm/pgtable-2level-defs.h> | 45 | # include <asm/pgtable-2level-defs.h> |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 49 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
50 | 50 | ||
51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | 51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | 52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
@@ -58,21 +58,22 @@ void paging_init(void); | |||
58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
59 | * area for the same reason. ;) | 59 | * area for the same reason. ;) |
60 | */ | 60 | */ |
61 | #define VMALLOC_OFFSET (8*1024*1024) | 61 | #define VMALLOC_OFFSET (8 * 1024 * 1024) |
62 | #define VMALLOC_START (((unsigned long) high_memory + \ | 62 | #define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ |
63 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) | 63 | & ~(VMALLOC_OFFSET - 1)) |
64 | #ifdef CONFIG_X86_PAE | 64 | #ifdef CONFIG_X86_PAE |
65 | #define LAST_PKMAP 512 | 65 | #define LAST_PKMAP 512 |
66 | #else | 66 | #else |
67 | #define LAST_PKMAP 1024 | 67 | #define LAST_PKMAP 1024 |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) | 70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ |
71 | & PMD_MASK) | ||
71 | 72 | ||
72 | #ifdef CONFIG_HIGHMEM | 73 | #ifdef CONFIG_HIGHMEM |
73 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | 74 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) |
74 | #else | 75 | #else |
75 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | 76 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) |
76 | #endif | 77 | #endif |
77 | 78 | ||
78 | /* | 79 | /* |
@@ -88,10 +89,16 @@ extern unsigned long pg0[]; | |||
88 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 89 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
89 | 90 | ||
90 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ | 91 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
91 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | 92 | #define pmd_none(x) (!(unsigned long)pmd_val((x))) |
92 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 93 | #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) |
93 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | ||
94 | 94 | ||
95 | extern int pmd_bad(pmd_t pmd); | ||
96 | |||
97 | #define pmd_bad_v1(x) \ | ||
98 | (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) | ||
99 | #define pmd_bad_v2(x) \ | ||
100 | (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ | ||
101 | _PAGE_PSE | _PAGE_NX))) | ||
95 | 102 | ||
96 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 103 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
97 | 104 | ||
@@ -117,17 +124,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
117 | } | 124 | } |
118 | 125 | ||
119 | /* | 126 | /* |
120 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | 127 | * Macro to mark a page protection value as "uncacheable". |
121 | * it, this is a no-op. | 128 | * On processors which do not support it, this is a no-op. |
122 | */ | 129 | */ |
123 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | 130 | #define pgprot_noncached(prot) \ |
124 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | 131 | ((boot_cpu_data.x86 > 3) \ |
132 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ | ||
133 | : (prot)) | ||
125 | 134 | ||
126 | /* | 135 | /* |
127 | * Conversion functions: convert a page and protection to a page entry, | 136 | * Conversion functions: convert a page and protection to a page entry, |
128 | * and a page entry and page directory to the page they refer to. | 137 | * and a page entry and page directory to the page they refer to. |
129 | */ | 138 | */ |
130 | |||
131 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 139 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
132 | 140 | ||
133 | /* | 141 | /* |
@@ -136,20 +144,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
136 | * this macro returns the index of the entry in the pgd page which would | 144 | * this macro returns the index of the entry in the pgd page which would |
137 | * control the given virtual address | 145 | * control the given virtual address |
138 | */ | 146 | */ |
139 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 147 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
140 | #define pgd_index_k(addr) pgd_index(addr) | 148 | #define pgd_index_k(addr) pgd_index((addr)) |
141 | 149 | ||
142 | /* | 150 | /* |
143 | * pgd_offset() returns a (pgd_t *) | 151 | * pgd_offset() returns a (pgd_t *) |
144 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | 152 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
145 | */ | 153 | */ |
146 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 154 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
147 | 155 | ||
148 | /* | 156 | /* |
149 | * a shortcut which implies the use of the kernel's pgd, instead | 157 | * a shortcut which implies the use of the kernel's pgd, instead |
150 | * of a process's | 158 | * of a process's |
151 | */ | 159 | */ |
152 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 160 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) |
153 | 161 | ||
154 | static inline int pud_large(pud_t pud) { return 0; } | 162 | static inline int pud_large(pud_t pud) { return 0; } |
155 | 163 | ||
@@ -159,8 +167,8 @@ static inline int pud_large(pud_t pud) { return 0; } | |||
159 | * this macro returns the index of the entry in the pmd page which would | 167 | * this macro returns the index of the entry in the pmd page which would |
160 | * control the given virtual address | 168 | * control the given virtual address |
161 | */ | 169 | */ |
162 | #define pmd_index(address) \ | 170 | #define pmd_index(address) \ |
163 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 171 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
164 | 172 | ||
165 | /* | 173 | /* |
166 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 174 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
@@ -168,43 +176,45 @@ static inline int pud_large(pud_t pud) { return 0; } | |||
168 | * this macro returns the index of the entry in the pte page which would | 176 | * this macro returns the index of the entry in the pte page which would |
169 | * control the given virtual address | 177 | * control the given virtual address |
170 | */ | 178 | */ |
171 | #define pte_index(address) \ | 179 | #define pte_index(address) \ |
172 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 180 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
173 | #define pte_offset_kernel(dir, address) \ | 181 | #define pte_offset_kernel(dir, address) \ |
174 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) | 182 | ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) |
175 | 183 | ||
176 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 184 | #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) |
177 | 185 | ||
178 | #define pmd_page_vaddr(pmd) \ | 186 | #define pmd_page_vaddr(pmd) \ |
179 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 187 | ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK)) |
180 | 188 | ||
181 | #if defined(CONFIG_HIGHPTE) | 189 | #if defined(CONFIG_HIGHPTE) |
182 | #define pte_offset_map(dir, address) \ | 190 | #define pte_offset_map(dir, address) \ |
183 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 191 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ |
184 | #define pte_offset_map_nested(dir, address) \ | 192 | pte_index((address))) |
185 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) | 193 | #define pte_offset_map_nested(dir, address) \ |
186 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | 194 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ |
187 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | 195 | pte_index((address))) |
196 | #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) | ||
197 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | ||
188 | #else | 198 | #else |
189 | #define pte_offset_map(dir, address) \ | 199 | #define pte_offset_map(dir, address) \ |
190 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | 200 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) |
191 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | 201 | #define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) |
192 | #define pte_unmap(pte) do { } while (0) | 202 | #define pte_unmap(pte) do { } while (0) |
193 | #define pte_unmap_nested(pte) do { } while (0) | 203 | #define pte_unmap_nested(pte) do { } while (0) |
194 | #endif | 204 | #endif |
195 | 205 | ||
196 | /* Clear a kernel PTE and flush it from the TLB */ | 206 | /* Clear a kernel PTE and flush it from the TLB */ |
197 | #define kpte_clear_flush(ptep, vaddr) \ | 207 | #define kpte_clear_flush(ptep, vaddr) \ |
198 | do { \ | 208 | do { \ |
199 | pte_clear(&init_mm, vaddr, ptep); \ | 209 | pte_clear(&init_mm, (vaddr), (ptep)); \ |
200 | __flush_tlb_one(vaddr); \ | 210 | __flush_tlb_one((vaddr)); \ |
201 | } while (0) | 211 | } while (0) |
202 | 212 | ||
203 | /* | 213 | /* |
204 | * The i386 doesn't have any external MMU info: the kernel page | 214 | * The i386 doesn't have any external MMU info: the kernel page |
205 | * tables contain all the necessary information. | 215 | * tables contain all the necessary information. |
206 | */ | 216 | */ |
207 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 217 | #define update_mmu_cache(vma, address, pte) do { } while (0) |
208 | 218 | ||
209 | void native_pagetable_setup_start(pgd_t *base); | 219 | void native_pagetable_setup_start(pgd_t *base); |
210 | void native_pagetable_setup_done(pgd_t *base); | 220 | void native_pagetable_setup_done(pgd_t *base); |
@@ -233,7 +243,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) | |||
233 | #define kern_addr_valid(kaddr) (0) | 243 | #define kern_addr_valid(kaddr) (0) |
234 | #endif | 244 | #endif |
235 | 245 | ||
236 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 246 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
237 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 247 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
238 | 248 | ||
239 | #endif /* _I386_PGTABLE_H */ | 249 | #endif /* _I386_PGTABLE_H */ |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 0a0b77bc736a..9fd87d0b6477 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -52,14 +52,18 @@ extern void paging_init(void); | |||
52 | 52 | ||
53 | #ifndef __ASSEMBLY__ | 53 | #ifndef __ASSEMBLY__ |
54 | 54 | ||
55 | #define pte_ERROR(e) \ | 55 | #define pte_ERROR(e) \ |
56 | printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) | 56 | printk("%s:%d: bad pte %p(%016lx).\n", \ |
57 | #define pmd_ERROR(e) \ | 57 | __FILE__, __LINE__, &(e), pte_val(e)) |
58 | printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | 58 | #define pmd_ERROR(e) \ |
59 | #define pud_ERROR(e) \ | 59 | printk("%s:%d: bad pmd %p(%016lx).\n", \ |
60 | printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) | 60 | __FILE__, __LINE__, &(e), pmd_val(e)) |
61 | #define pgd_ERROR(e) \ | 61 | #define pud_ERROR(e) \ |
62 | printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 62 | printk("%s:%d: bad pud %p(%016lx).\n", \ |
63 | __FILE__, __LINE__, &(e), pud_val(e)) | ||
64 | #define pgd_ERROR(e) \ | ||
65 | printk("%s:%d: bad pgd %p(%016lx).\n", \ | ||
66 | __FILE__, __LINE__, &(e), pgd_val(e)) | ||
63 | 67 | ||
64 | #define pgd_none(x) (!pgd_val(x)) | 68 | #define pgd_none(x) (!pgd_val(x)) |
65 | #define pud_none(x) (!pud_val(x)) | 69 | #define pud_none(x) (!pud_val(x)) |
@@ -87,7 +91,8 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) | |||
87 | #ifdef CONFIG_SMP | 91 | #ifdef CONFIG_SMP |
88 | return native_make_pte(xchg(&xp->pte, 0)); | 92 | return native_make_pte(xchg(&xp->pte, 0)); |
89 | #else | 93 | #else |
90 | /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ | 94 | /* native_local_ptep_get_and_clear, |
95 | but duplicated because of cyclic dependency */ | ||
91 | pte_t ret = *xp; | 96 | pte_t ret = *xp; |
92 | native_pte_clear(NULL, 0, xp); | 97 | native_pte_clear(NULL, 0, xp); |
93 | return ret; | 98 | return ret; |
@@ -119,7 +124,7 @@ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) | |||
119 | *pgdp = pgd; | 124 | *pgdp = pgd; |
120 | } | 125 | } |
121 | 126 | ||
122 | static inline void native_pgd_clear(pgd_t * pgd) | 127 | static inline void native_pgd_clear(pgd_t *pgd) |
123 | { | 128 | { |
124 | native_set_pgd(pgd, native_make_pgd(0)); | 129 | native_set_pgd(pgd, native_make_pgd(0)); |
125 | } | 130 | } |
@@ -128,19 +133,19 @@ static inline void native_pgd_clear(pgd_t * pgd) | |||
128 | 133 | ||
129 | #endif /* !__ASSEMBLY__ */ | 134 | #endif /* !__ASSEMBLY__ */ |
130 | 135 | ||
131 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) | 136 | #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) |
132 | #define PMD_MASK (~(PMD_SIZE-1)) | 137 | #define PMD_MASK (~(PMD_SIZE - 1)) |
133 | #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) | 138 | #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) |
134 | #define PUD_MASK (~(PUD_SIZE-1)) | 139 | #define PUD_MASK (~(PUD_SIZE - 1)) |
135 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) | 140 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) |
136 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 141 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
137 | 142 | ||
138 | 143 | ||
139 | #define MAXMEM _AC(0x3fffffffffff, UL) | 144 | #define MAXMEM _AC(0x00003fffffffffff, UL) |
140 | #define VMALLOC_START _AC(0xffffc20000000000, UL) | 145 | #define VMALLOC_START _AC(0xffffc20000000000, UL) |
141 | #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) | 146 | #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) |
142 | #define VMEMMAP_START _AC(0xffffe20000000000, UL) | 147 | #define VMEMMAP_START _AC(0xffffe20000000000, UL) |
143 | #define MODULES_VADDR _AC(0xffffffff88000000, UL) | 148 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) |
144 | #define MODULES_END _AC(0xfffffffffff00000, UL) | 149 | #define MODULES_END _AC(0xfffffffffff00000, UL) |
145 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 150 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
146 | 151 | ||
@@ -153,26 +158,28 @@ static inline unsigned long pgd_bad(pgd_t pgd) | |||
153 | 158 | ||
154 | static inline unsigned long pud_bad(pud_t pud) | 159 | static inline unsigned long pud_bad(pud_t pud) |
155 | { | 160 | { |
156 | return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); | 161 | return pud_val(pud) & |
162 | ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); | ||
157 | } | 163 | } |
158 | 164 | ||
159 | static inline unsigned long pmd_bad(pmd_t pmd) | 165 | static inline unsigned long pmd_bad(pmd_t pmd) |
160 | { | 166 | { |
161 | return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); | 167 | return pmd_val(pmd) & |
168 | ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); | ||
162 | } | 169 | } |
163 | 170 | ||
164 | #define pte_none(x) (!pte_val(x)) | 171 | #define pte_none(x) (!pte_val((x))) |
165 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 172 | #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
166 | 173 | ||
167 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ | 174 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ |
168 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 175 | #define pte_page(x) pfn_to_page(pte_pfn((x))) |
169 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 176 | #define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
170 | 177 | ||
171 | /* | 178 | /* |
172 | * Macro to mark a page protection value as "uncacheable". | 179 | * Macro to mark a page protection value as "uncacheable". |
173 | */ | 180 | */ |
174 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) | 181 | #define pgprot_noncached(prot) \ |
175 | 182 | (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT)) | |
176 | 183 | ||
177 | /* | 184 | /* |
178 | * Conversion functions: convert a page and protection to a page entry, | 185 | * Conversion functions: convert a page and protection to a page entry, |
@@ -182,75 +189,81 @@ static inline unsigned long pmd_bad(pmd_t pmd) | |||
182 | /* | 189 | /* |
183 | * Level 4 access. | 190 | * Level 4 access. |
184 | */ | 191 | */ |
185 | #define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK)) | 192 | #define pgd_page_vaddr(pgd) \ |
186 | #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)) | 193 | ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK)) |
187 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 194 | #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT)) |
188 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | 195 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
189 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) | 196 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
197 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index((address))) | ||
190 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) | 198 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) |
191 | static inline int pgd_large(pgd_t pgd) { return 0; } | 199 | static inline int pgd_large(pgd_t pgd) { return 0; } |
192 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) | 200 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) |
193 | 201 | ||
194 | /* PUD - Level3 access */ | 202 | /* PUD - Level3 access */ |
195 | /* to find an entry in a page-table-directory. */ | 203 | /* to find an entry in a page-table-directory. */ |
196 | #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) | 204 | #define pud_page_vaddr(pud) \ |
197 | #define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT)) | 205 | ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK)) |
198 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | 206 | #define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT)) |
199 | #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address)) | 207 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) |
200 | #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) | 208 | #define pud_offset(pgd, address) \ |
209 | ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address))) | ||
210 | #define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT) | ||
201 | 211 | ||
202 | static inline int pud_large(pud_t pte) | 212 | static inline int pud_large(pud_t pte) |
203 | { | 213 | { |
204 | return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | 214 | return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == |
205 | (_PAGE_PSE|_PAGE_PRESENT); | 215 | (_PAGE_PSE | _PAGE_PRESENT); |
206 | } | 216 | } |
207 | 217 | ||
208 | /* PMD - Level 2 access */ | 218 | /* PMD - Level 2 access */ |
209 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) | 219 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK)) |
210 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 220 | #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) |
211 | 221 | ||
212 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 222 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
213 | #define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \ | 223 | #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \ |
214 | pmd_index(address)) | 224 | pmd_index(address)) |
215 | #define pmd_none(x) (!pmd_val(x)) | 225 | #define pmd_none(x) (!pmd_val((x))) |
216 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 226 | #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) |
217 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) | 227 | #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot)))) |
218 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 228 | #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
219 | 229 | ||
220 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) | 230 | #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) |
221 | #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) | 231 | #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ |
232 | _PAGE_FILE }) | ||
222 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT | 233 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT |
223 | 234 | ||
224 | /* PTE - Level 1 access. */ | 235 | /* PTE - Level 1 access. */ |
225 | 236 | ||
226 | /* page, protection -> pte */ | 237 | /* page, protection -> pte */ |
227 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 238 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot)) |
228 | 239 | ||
229 | #define pte_index(address) \ | 240 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
230 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
231 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ | 241 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ |
232 | pte_index(address)) | 242 | pte_index((address))) |
233 | 243 | ||
234 | /* x86-64 always has all page tables mapped. */ | 244 | /* x86-64 always has all page tables mapped. */ |
235 | #define pte_offset_map(dir,address) pte_offset_kernel(dir,address) | 245 | #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) |
236 | #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) | 246 | #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) |
237 | #define pte_unmap(pte) /* NOP */ | 247 | #define pte_unmap(pte) /* NOP */ |
238 | #define pte_unmap_nested(pte) /* NOP */ | 248 | #define pte_unmap_nested(pte) /* NOP */ |
249 | |||
250 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
239 | 251 | ||
240 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 252 | extern int direct_gbpages; |
241 | 253 | ||
242 | /* Encode and de-code a swap entry */ | 254 | /* Encode and de-code a swap entry */ |
243 | #define __swp_type(x) (((x).val >> 1) & 0x3f) | 255 | #define __swp_type(x) (((x).val >> 1) & 0x3f) |
244 | #define __swp_offset(x) ((x).val >> 8) | 256 | #define __swp_offset(x) ((x).val >> 8) |
245 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 257 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ |
246 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 258 | ((offset) << 8) }) |
259 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) | ||
247 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) | 260 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
248 | 261 | ||
249 | extern int kern_addr_valid(unsigned long addr); | 262 | extern int kern_addr_valid(unsigned long addr); |
250 | extern void cleanup_highmap(void); | 263 | extern void cleanup_highmap(void); |
251 | 264 | ||
252 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 265 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
253 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 266 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
254 | 267 | ||
255 | #define HAVE_ARCH_UNMAPPED_AREA | 268 | #define HAVE_ARCH_UNMAPPED_AREA |
256 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 269 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
@@ -263,8 +276,10 @@ extern void cleanup_highmap(void); | |||
263 | 276 | ||
264 | /* fs/proc/kcore.c */ | 277 | /* fs/proc/kcore.c */ |
265 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) | 278 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) |
266 | #define kc_offset_to_vaddr(o) \ | 279 | #define kc_offset_to_vaddr(o) \ |
267 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | 280 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \ |
281 | ? ((o) | ~__VIRTUAL_MASK) \ | ||
282 | : (o)) | ||
268 | 283 | ||
269 | #define __HAVE_ARCH_PTE_SAME | 284 | #define __HAVE_ARCH_PTE_SAME |
270 | #endif /* !__ASSEMBLY__ */ | 285 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h index bb7133dc155d..fe312a5ba204 100644 --- a/include/asm-x86/posix_types.h +++ b/include/asm-x86/posix_types.h | |||
@@ -1,11 +1,5 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 2 | # if defined(CONFIG_X86_32) || defined(__i386__) |
3 | # include "posix_types_32.h" | ||
4 | # else | ||
5 | # include "posix_types_64.h" | ||
6 | # endif | ||
7 | #else | ||
8 | # ifdef __i386__ | ||
9 | # include "posix_types_32.h" | 3 | # include "posix_types_32.h" |
10 | # else | 4 | # else |
11 | # include "posix_types_64.h" | 5 | # include "posix_types_64.h" |
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h index 015e539cdef5..b031efda37ec 100644 --- a/include/asm-x86/posix_types_32.h +++ b/include/asm-x86/posix_types_32.h | |||
@@ -45,32 +45,39 @@ typedef struct { | |||
45 | #if defined(__KERNEL__) | 45 | #if defined(__KERNEL__) |
46 | 46 | ||
47 | #undef __FD_SET | 47 | #undef __FD_SET |
48 | #define __FD_SET(fd,fdsetp) \ | 48 | #define __FD_SET(fd,fdsetp) \ |
49 | __asm__ __volatile__("btsl %1,%0": \ | 49 | asm volatile("btsl %1,%0": \ |
50 | "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) | 50 | "+m" (*(__kernel_fd_set *)(fdsetp)) \ |
51 | : "r" ((int)(fd))) | ||
51 | 52 | ||
52 | #undef __FD_CLR | 53 | #undef __FD_CLR |
53 | #define __FD_CLR(fd,fdsetp) \ | 54 | #define __FD_CLR(fd,fdsetp) \ |
54 | __asm__ __volatile__("btrl %1,%0": \ | 55 | asm volatile("btrl %1,%0": \ |
55 | "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) | 56 | "+m" (*(__kernel_fd_set *)(fdsetp)) \ |
57 | : "r" ((int) (fd))) | ||
56 | 58 | ||
57 | #undef __FD_ISSET | 59 | #undef __FD_ISSET |
58 | #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ | 60 | #define __FD_ISSET(fd,fdsetp) \ |
59 | unsigned char __result; \ | 61 | (__extension__ \ |
60 | __asm__ __volatile__("btl %1,%2 ; setb %0" \ | 62 | ({ \ |
61 | :"=q" (__result) :"r" ((int) (fd)), \ | 63 | unsigned char __result; \ |
62 | "m" (*(__kernel_fd_set *) (fdsetp))); \ | 64 | asm volatile("btl %1,%2 ; setb %0" \ |
63 | __result; })) | 65 | : "=q" (__result) \ |
66 | : "r" ((int)(fd)), \ | ||
67 | "m" (*(__kernel_fd_set *)(fdsetp))); \ | ||
68 | __result; \ | ||
69 | })) | ||
64 | 70 | ||
65 | #undef __FD_ZERO | 71 | #undef __FD_ZERO |
66 | #define __FD_ZERO(fdsetp) \ | 72 | #define __FD_ZERO(fdsetp) \ |
67 | do { \ | 73 | do { \ |
68 | int __d0, __d1; \ | 74 | int __d0, __d1; \ |
69 | __asm__ __volatile__("cld ; rep ; stosl" \ | 75 | asm volatile("cld ; rep ; stosl" \ |
70 | :"=m" (*(__kernel_fd_set *) (fdsetp)), \ | 76 | : "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
71 | "=&c" (__d0), "=&D" (__d1) \ | 77 | "=&c" (__d0), "=&D" (__d1) \ |
72 | :"a" (0), "1" (__FDSET_LONGS), \ | 78 | : "a" (0), "1" (__FDSET_LONGS), \ |
73 | "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ | 79 | "2" ((__kernel_fd_set *)(fdsetp)) \ |
80 | : "memory"); \ | ||
74 | } while (0) | 81 | } while (0) |
75 | 82 | ||
76 | #endif /* defined(__KERNEL__) */ | 83 | #endif /* defined(__KERNEL__) */ |
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h index 9926aa43775b..d6624c95854a 100644 --- a/include/asm-x86/posix_types_64.h +++ b/include/asm-x86/posix_types_64.h | |||
@@ -46,7 +46,7 @@ typedef unsigned long __kernel_old_dev_t; | |||
46 | #ifdef __KERNEL__ | 46 | #ifdef __KERNEL__ |
47 | 47 | ||
48 | #undef __FD_SET | 48 | #undef __FD_SET |
49 | static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | 49 | static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) |
50 | { | 50 | { |
51 | unsigned long _tmp = fd / __NFDBITS; | 51 | unsigned long _tmp = fd / __NFDBITS; |
52 | unsigned long _rem = fd % __NFDBITS; | 52 | unsigned long _rem = fd % __NFDBITS; |
@@ -54,7 +54,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | |||
54 | } | 54 | } |
55 | 55 | ||
56 | #undef __FD_CLR | 56 | #undef __FD_CLR |
57 | static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | 57 | static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) |
58 | { | 58 | { |
59 | unsigned long _tmp = fd / __NFDBITS; | 59 | unsigned long _tmp = fd / __NFDBITS; |
60 | unsigned long _rem = fd % __NFDBITS; | 60 | unsigned long _rem = fd % __NFDBITS; |
@@ -62,7 +62,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | #undef __FD_ISSET | 64 | #undef __FD_ISSET |
65 | static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | 65 | static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) |
66 | { | 66 | { |
67 | unsigned long _tmp = fd / __NFDBITS; | 67 | unsigned long _tmp = fd / __NFDBITS; |
68 | unsigned long _rem = fd % __NFDBITS; | 68 | unsigned long _rem = fd % __NFDBITS; |
@@ -74,36 +74,36 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | |||
74 | * for 256 and 1024-bit fd_sets respectively) | 74 | * for 256 and 1024-bit fd_sets respectively) |
75 | */ | 75 | */ |
76 | #undef __FD_ZERO | 76 | #undef __FD_ZERO |
77 | static __inline__ void __FD_ZERO(__kernel_fd_set *p) | 77 | static inline void __FD_ZERO(__kernel_fd_set *p) |
78 | { | 78 | { |
79 | unsigned long *tmp = p->fds_bits; | 79 | unsigned long *tmp = p->fds_bits; |
80 | int i; | 80 | int i; |
81 | 81 | ||
82 | if (__builtin_constant_p(__FDSET_LONGS)) { | 82 | if (__builtin_constant_p(__FDSET_LONGS)) { |
83 | switch (__FDSET_LONGS) { | 83 | switch (__FDSET_LONGS) { |
84 | case 32: | 84 | case 32: |
85 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 85 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
86 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | 86 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; |
87 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | 87 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; |
88 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | 88 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; |
89 | tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; | 89 | tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0; |
90 | tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; | 90 | tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0; |
91 | tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; | 91 | tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0; |
92 | tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; | 92 | tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0; |
93 | return; | 93 | return; |
94 | case 16: | 94 | case 16: |
95 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 95 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
96 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | 96 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; |
97 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | 97 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; |
98 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | 98 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; |
99 | return; | 99 | return; |
100 | case 8: | 100 | case 8: |
101 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 101 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
102 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | 102 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; |
103 | return; | 103 | return; |
104 | case 4: | 104 | case 4: |
105 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | 105 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; |
106 | return; | 106 | return; |
107 | } | 107 | } |
108 | } | 108 | } |
109 | i = __FDSET_LONGS; | 109 | i = __FDSET_LONGS; |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 45a2f0ab33d0..6e26c7c717a2 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -3,8 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/processor-flags.h> | 4 | #include <asm/processor-flags.h> |
5 | 5 | ||
6 | /* migration helpers, for KVM - will be removed in 2.6.25: */ | 6 | /* migration helper, for KVM - will be removed in 2.6.25: */ |
7 | #include <asm/vm86.h> | ||
8 | #define Xgt_desc_struct desc_ptr | 7 | #define Xgt_desc_struct desc_ptr |
9 | 8 | ||
10 | /* Forward declaration, a strange C thing */ | 9 | /* Forward declaration, a strange C thing */ |
@@ -24,6 +23,7 @@ struct mm_struct; | |||
24 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
25 | #include <asm/desc_defs.h> | 24 | #include <asm/desc_defs.h> |
26 | #include <asm/nops.h> | 25 | #include <asm/nops.h> |
26 | |||
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/cpumask.h> | 28 | #include <linux/cpumask.h> |
29 | #include <linux/cache.h> | 29 | #include <linux/cache.h> |
@@ -37,16 +37,18 @@ struct mm_struct; | |||
37 | static inline void *current_text_addr(void) | 37 | static inline void *current_text_addr(void) |
38 | { | 38 | { |
39 | void *pc; | 39 | void *pc; |
40 | asm volatile("mov $1f,%0\n1:":"=r" (pc)); | 40 | |
41 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | ||
42 | |||
41 | return pc; | 43 | return pc; |
42 | } | 44 | } |
43 | 45 | ||
44 | #ifdef CONFIG_X86_VSMP | 46 | #ifdef CONFIG_X86_VSMP |
45 | #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) | 47 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
46 | #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | 48 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) |
47 | #else | 49 | #else |
48 | #define ARCH_MIN_TASKALIGN 16 | 50 | # define ARCH_MIN_TASKALIGN 16 |
49 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | 51 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 |
50 | #endif | 52 | #endif |
51 | 53 | ||
52 | /* | 54 | /* |
@@ -56,69 +58,82 @@ static inline void *current_text_addr(void) | |||
56 | */ | 58 | */ |
57 | 59 | ||
58 | struct cpuinfo_x86 { | 60 | struct cpuinfo_x86 { |
59 | __u8 x86; /* CPU family */ | 61 | __u8 x86; /* CPU family */ |
60 | __u8 x86_vendor; /* CPU vendor */ | 62 | __u8 x86_vendor; /* CPU vendor */ |
61 | __u8 x86_model; | 63 | __u8 x86_model; |
62 | __u8 x86_mask; | 64 | __u8 x86_mask; |
63 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
64 | char wp_works_ok; /* It doesn't on 386's */ | 66 | char wp_works_ok; /* It doesn't on 386's */ |
65 | char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ | 67 | |
66 | char hard_math; | 68 | /* Problems on some 486Dx4's and old 386's: */ |
67 | char rfu; | 69 | char hlt_works_ok; |
68 | char fdiv_bug; | 70 | char hard_math; |
69 | char f00f_bug; | 71 | char rfu; |
70 | char coma_bug; | 72 | char fdiv_bug; |
71 | char pad0; | 73 | char f00f_bug; |
74 | char coma_bug; | ||
75 | char pad0; | ||
72 | #else | 76 | #else |
73 | /* number of 4K pages in DTLB/ITLB combined(in pages)*/ | 77 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
74 | int x86_tlbsize; | 78 | int x86_tlbsize; |
75 | __u8 x86_virt_bits, x86_phys_bits; | 79 | __u8 x86_virt_bits; |
76 | /* cpuid returned core id bits */ | 80 | __u8 x86_phys_bits; |
77 | __u8 x86_coreid_bits; | 81 | /* CPUID returned core id bits: */ |
78 | /* Max extended CPUID function supported */ | 82 | __u8 x86_coreid_bits; |
79 | __u32 extended_cpuid_level; | 83 | /* Max extended CPUID function supported: */ |
84 | __u32 extended_cpuid_level; | ||
80 | #endif | 85 | #endif |
81 | int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ | 86 | /* Maximum supported CPUID level, -1=no CPUID: */ |
82 | __u32 x86_capability[NCAPINTS]; | 87 | int cpuid_level; |
83 | char x86_vendor_id[16]; | 88 | __u32 x86_capability[NCAPINTS]; |
84 | char x86_model_id[64]; | 89 | char x86_vendor_id[16]; |
85 | int x86_cache_size; /* in KB - valid for CPUS which support this | 90 | char x86_model_id[64]; |
86 | call */ | 91 | /* in KB - valid for CPUS which support this call: */ |
87 | int x86_cache_alignment; /* In bytes */ | 92 | int x86_cache_size; |
88 | int x86_power; | 93 | int x86_cache_alignment; /* In bytes */ |
89 | unsigned long loops_per_jiffy; | 94 | int x86_power; |
95 | unsigned long loops_per_jiffy; | ||
90 | #ifdef CONFIG_SMP | 96 | #ifdef CONFIG_SMP |
91 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | 97 | /* cpus sharing the last level cache: */ |
98 | cpumask_t llc_shared_map; | ||
92 | #endif | 99 | #endif |
93 | u16 x86_max_cores; /* cpuid returned max cores value */ | 100 | /* cpuid returned max cores value: */ |
94 | u16 apicid; | 101 | u16 x86_max_cores; |
95 | u16 x86_clflush_size; | 102 | u16 apicid; |
103 | u16 initial_apicid; | ||
104 | u16 x86_clflush_size; | ||
96 | #ifdef CONFIG_SMP | 105 | #ifdef CONFIG_SMP |
97 | u16 booted_cores; /* number of cores as seen by OS */ | 106 | /* number of cores as seen by the OS: */ |
98 | u16 phys_proc_id; /* Physical processor id. */ | 107 | u16 booted_cores; |
99 | u16 cpu_core_id; /* Core id */ | 108 | /* Physical processor id: */ |
100 | u16 cpu_index; /* index into per_cpu list */ | 109 | u16 phys_proc_id; |
110 | /* Core id: */ | ||
111 | u16 cpu_core_id; | ||
112 | /* Index into per_cpu list: */ | ||
113 | u16 cpu_index; | ||
101 | #endif | 114 | #endif |
102 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 115 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
103 | 116 | ||
104 | #define X86_VENDOR_INTEL 0 | 117 | #define X86_VENDOR_INTEL 0 |
105 | #define X86_VENDOR_CYRIX 1 | 118 | #define X86_VENDOR_CYRIX 1 |
106 | #define X86_VENDOR_AMD 2 | 119 | #define X86_VENDOR_AMD 2 |
107 | #define X86_VENDOR_UMC 3 | 120 | #define X86_VENDOR_UMC 3 |
108 | #define X86_VENDOR_NEXGEN 4 | 121 | #define X86_VENDOR_NEXGEN 4 |
109 | #define X86_VENDOR_CENTAUR 5 | 122 | #define X86_VENDOR_CENTAUR 5 |
110 | #define X86_VENDOR_TRANSMETA 7 | 123 | #define X86_VENDOR_TRANSMETA 7 |
111 | #define X86_VENDOR_NSC 8 | 124 | #define X86_VENDOR_NSC 8 |
112 | #define X86_VENDOR_NUM 9 | 125 | #define X86_VENDOR_NUM 9 |
113 | #define X86_VENDOR_UNKNOWN 0xff | 126 | |
127 | #define X86_VENDOR_UNKNOWN 0xff | ||
114 | 128 | ||
115 | /* | 129 | /* |
116 | * capabilities of CPUs | 130 | * capabilities of CPUs |
117 | */ | 131 | */ |
118 | extern struct cpuinfo_x86 boot_cpu_data; | 132 | extern struct cpuinfo_x86 boot_cpu_data; |
119 | extern struct cpuinfo_x86 new_cpu_data; | 133 | extern struct cpuinfo_x86 new_cpu_data; |
120 | extern struct tss_struct doublefault_tss; | 134 | |
121 | extern __u32 cleared_cpu_caps[NCAPINTS]; | 135 | extern struct tss_struct doublefault_tss; |
136 | extern __u32 cleared_cpu_caps[NCAPINTS]; | ||
122 | 137 | ||
123 | #ifdef CONFIG_SMP | 138 | #ifdef CONFIG_SMP |
124 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | 139 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); |
@@ -129,7 +144,18 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | |||
129 | #define current_cpu_data boot_cpu_data | 144 | #define current_cpu_data boot_cpu_data |
130 | #endif | 145 | #endif |
131 | 146 | ||
132 | void cpu_detect(struct cpuinfo_x86 *c); | 147 | static inline int hlt_works(int cpu) |
148 | { | ||
149 | #ifdef CONFIG_X86_32 | ||
150 | return cpu_data(cpu).hlt_works_ok; | ||
151 | #else | ||
152 | return 1; | ||
153 | #endif | ||
154 | } | ||
155 | |||
156 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
157 | |||
158 | extern void cpu_detect(struct cpuinfo_x86 *c); | ||
133 | 159 | ||
134 | extern void identify_cpu(struct cpuinfo_x86 *); | 160 | extern void identify_cpu(struct cpuinfo_x86 *); |
135 | extern void identify_boot_cpu(void); | 161 | extern void identify_boot_cpu(void); |
@@ -146,15 +172,15 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} | |||
146 | #endif | 172 | #endif |
147 | 173 | ||
148 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | 174 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
149 | unsigned int *ecx, unsigned int *edx) | 175 | unsigned int *ecx, unsigned int *edx) |
150 | { | 176 | { |
151 | /* ecx is often an input as well as an output. */ | 177 | /* ecx is often an input as well as an output. */ |
152 | __asm__("cpuid" | 178 | asm("cpuid" |
153 | : "=a" (*eax), | 179 | : "=a" (*eax), |
154 | "=b" (*ebx), | 180 | "=b" (*ebx), |
155 | "=c" (*ecx), | 181 | "=c" (*ecx), |
156 | "=d" (*edx) | 182 | "=d" (*edx) |
157 | : "0" (*eax), "2" (*ecx)); | 183 | : "0" (*eax), "2" (*ecx)); |
158 | } | 184 | } |
159 | 185 | ||
160 | static inline void load_cr3(pgd_t *pgdir) | 186 | static inline void load_cr3(pgd_t *pgdir) |
@@ -165,54 +191,67 @@ static inline void load_cr3(pgd_t *pgdir) | |||
165 | #ifdef CONFIG_X86_32 | 191 | #ifdef CONFIG_X86_32 |
166 | /* This is the TSS defined by the hardware. */ | 192 | /* This is the TSS defined by the hardware. */ |
167 | struct x86_hw_tss { | 193 | struct x86_hw_tss { |
168 | unsigned short back_link, __blh; | 194 | unsigned short back_link, __blh; |
169 | unsigned long sp0; | 195 | unsigned long sp0; |
170 | unsigned short ss0, __ss0h; | 196 | unsigned short ss0, __ss0h; |
171 | unsigned long sp1; | 197 | unsigned long sp1; |
172 | unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */ | 198 | /* ss1 caches MSR_IA32_SYSENTER_CS: */ |
173 | unsigned long sp2; | 199 | unsigned short ss1, __ss1h; |
174 | unsigned short ss2, __ss2h; | 200 | unsigned long sp2; |
175 | unsigned long __cr3; | 201 | unsigned short ss2, __ss2h; |
176 | unsigned long ip; | 202 | unsigned long __cr3; |
177 | unsigned long flags; | 203 | unsigned long ip; |
178 | unsigned long ax, cx, dx, bx; | 204 | unsigned long flags; |
179 | unsigned long sp, bp, si, di; | 205 | unsigned long ax; |
180 | unsigned short es, __esh; | 206 | unsigned long cx; |
181 | unsigned short cs, __csh; | 207 | unsigned long dx; |
182 | unsigned short ss, __ssh; | 208 | unsigned long bx; |
183 | unsigned short ds, __dsh; | 209 | unsigned long sp; |
184 | unsigned short fs, __fsh; | 210 | unsigned long bp; |
185 | unsigned short gs, __gsh; | 211 | unsigned long si; |
186 | unsigned short ldt, __ldth; | 212 | unsigned long di; |
187 | unsigned short trace, io_bitmap_base; | 213 | unsigned short es, __esh; |
214 | unsigned short cs, __csh; | ||
215 | unsigned short ss, __ssh; | ||
216 | unsigned short ds, __dsh; | ||
217 | unsigned short fs, __fsh; | ||
218 | unsigned short gs, __gsh; | ||
219 | unsigned short ldt, __ldth; | ||
220 | unsigned short trace; | ||
221 | unsigned short io_bitmap_base; | ||
222 | |||
188 | } __attribute__((packed)); | 223 | } __attribute__((packed)); |
189 | #else | 224 | #else |
190 | struct x86_hw_tss { | 225 | struct x86_hw_tss { |
191 | u32 reserved1; | 226 | u32 reserved1; |
192 | u64 sp0; | 227 | u64 sp0; |
193 | u64 sp1; | 228 | u64 sp1; |
194 | u64 sp2; | 229 | u64 sp2; |
195 | u64 reserved2; | 230 | u64 reserved2; |
196 | u64 ist[7]; | 231 | u64 ist[7]; |
197 | u32 reserved3; | 232 | u32 reserved3; |
198 | u32 reserved4; | 233 | u32 reserved4; |
199 | u16 reserved5; | 234 | u16 reserved5; |
200 | u16 io_bitmap_base; | 235 | u16 io_bitmap_base; |
236 | |||
201 | } __attribute__((packed)) ____cacheline_aligned; | 237 | } __attribute__((packed)) ____cacheline_aligned; |
202 | #endif | 238 | #endif |
203 | 239 | ||
204 | /* | 240 | /* |
205 | * Size of io_bitmap. | 241 | * IO-bitmap sizes: |
206 | */ | 242 | */ |
207 | #define IO_BITMAP_BITS 65536 | 243 | #define IO_BITMAP_BITS 65536 |
208 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | 244 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) |
209 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | 245 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) |
210 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | 246 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) |
211 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | 247 | #define INVALID_IO_BITMAP_OFFSET 0x8000 |
212 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | 248 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 |
213 | 249 | ||
214 | struct tss_struct { | 250 | struct tss_struct { |
215 | struct x86_hw_tss x86_tss; | 251 | /* |
252 | * The hardware state: | ||
253 | */ | ||
254 | struct x86_hw_tss x86_tss; | ||
216 | 255 | ||
217 | /* | 256 | /* |
218 | * The extra 1 is there because the CPU will access an | 257 | * The extra 1 is there because the CPU will access an |
@@ -220,90 +259,108 @@ struct tss_struct { | |||
220 | * bitmap. The extra byte must be all 1 bits, and must | 259 | * bitmap. The extra byte must be all 1 bits, and must |
221 | * be within the limit. | 260 | * be within the limit. |
222 | */ | 261 | */ |
223 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | 262 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
224 | /* | 263 | /* |
225 | * Cache the current maximum and the last task that used the bitmap: | 264 | * Cache the current maximum and the last task that used the bitmap: |
226 | */ | 265 | */ |
227 | unsigned long io_bitmap_max; | 266 | unsigned long io_bitmap_max; |
228 | struct thread_struct *io_bitmap_owner; | 267 | struct thread_struct *io_bitmap_owner; |
268 | |||
229 | /* | 269 | /* |
230 | * pads the TSS to be cacheline-aligned (size is 0x100) | 270 | * Pad the TSS to be cacheline-aligned (size is 0x100): |
231 | */ | 271 | */ |
232 | unsigned long __cacheline_filler[35]; | 272 | unsigned long __cacheline_filler[35]; |
233 | /* | 273 | /* |
234 | * .. and then another 0x100 bytes for emergency kernel stack | 274 | * .. and then another 0x100 bytes for the emergency kernel stack: |
235 | */ | 275 | */ |
236 | unsigned long stack[64]; | 276 | unsigned long stack[64]; |
277 | |||
237 | } __attribute__((packed)); | 278 | } __attribute__((packed)); |
238 | 279 | ||
239 | DECLARE_PER_CPU(struct tss_struct, init_tss); | 280 | DECLARE_PER_CPU(struct tss_struct, init_tss); |
240 | 281 | ||
241 | /* Save the original ist values for checking stack pointers during debugging */ | 282 | /* |
283 | * Save the original ist values for checking stack pointers during debugging | ||
284 | */ | ||
242 | struct orig_ist { | 285 | struct orig_ist { |
243 | unsigned long ist[7]; | 286 | unsigned long ist[7]; |
244 | }; | 287 | }; |
245 | 288 | ||
246 | #define MXCSR_DEFAULT 0x1f80 | 289 | #define MXCSR_DEFAULT 0x1f80 |
247 | 290 | ||
248 | struct i387_fsave_struct { | 291 | struct i387_fsave_struct { |
249 | u32 cwd; | 292 | u32 cwd; /* FPU Control Word */ |
250 | u32 swd; | 293 | u32 swd; /* FPU Status Word */ |
251 | u32 twd; | 294 | u32 twd; /* FPU Tag Word */ |
252 | u32 fip; | 295 | u32 fip; /* FPU IP Offset */ |
253 | u32 fcs; | 296 | u32 fcs; /* FPU IP Selector */ |
254 | u32 foo; | 297 | u32 foo; /* FPU Operand Pointer Offset */ |
255 | u32 fos; | 298 | u32 fos; /* FPU Operand Pointer Selector */ |
256 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | 299 | |
257 | u32 status; /* software status information */ | 300 | /* 8*10 bytes for each FP-reg = 80 bytes: */ |
301 | u32 st_space[20]; | ||
302 | |||
303 | /* Software status information [not touched by FSAVE ]: */ | ||
304 | u32 status; | ||
258 | }; | 305 | }; |
259 | 306 | ||
260 | struct i387_fxsave_struct { | 307 | struct i387_fxsave_struct { |
261 | u16 cwd; | 308 | u16 cwd; /* Control Word */ |
262 | u16 swd; | 309 | u16 swd; /* Status Word */ |
263 | u16 twd; | 310 | u16 twd; /* Tag Word */ |
264 | u16 fop; | 311 | u16 fop; /* Last Instruction Opcode */ |
265 | union { | 312 | union { |
266 | struct { | 313 | struct { |
267 | u64 rip; | 314 | u64 rip; /* Instruction Pointer */ |
268 | u64 rdp; | 315 | u64 rdp; /* Data Pointer */ |
269 | }; | 316 | }; |
270 | struct { | 317 | struct { |
271 | u32 fip; | 318 | u32 fip; /* FPU IP Offset */ |
272 | u32 fcs; | 319 | u32 fcs; /* FPU IP Selector */ |
273 | u32 foo; | 320 | u32 foo; /* FPU Operand Offset */ |
274 | u32 fos; | 321 | u32 fos; /* FPU Operand Selector */ |
275 | }; | 322 | }; |
276 | }; | 323 | }; |
277 | u32 mxcsr; | 324 | u32 mxcsr; /* MXCSR Register State */ |
278 | u32 mxcsr_mask; | 325 | u32 mxcsr_mask; /* MXCSR Mask */ |
279 | u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | 326 | |
280 | u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */ | 327 | /* 8*16 bytes for each FP-reg = 128 bytes: */ |
281 | u32 padding[24]; | 328 | u32 st_space[32]; |
329 | |||
330 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | ||
331 | u32 xmm_space[64]; | ||
332 | |||
333 | u32 padding[24]; | ||
334 | |||
282 | } __attribute__((aligned(16))); | 335 | } __attribute__((aligned(16))); |
283 | 336 | ||
284 | struct i387_soft_struct { | 337 | struct i387_soft_struct { |
285 | u32 cwd; | 338 | u32 cwd; |
286 | u32 swd; | 339 | u32 swd; |
287 | u32 twd; | 340 | u32 twd; |
288 | u32 fip; | 341 | u32 fip; |
289 | u32 fcs; | 342 | u32 fcs; |
290 | u32 foo; | 343 | u32 foo; |
291 | u32 fos; | 344 | u32 fos; |
292 | u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ | 345 | /* 8*10 bytes for each FP-reg = 80 bytes: */ |
293 | u8 ftop, changed, lookahead, no_update, rm, alimit; | 346 | u32 st_space[20]; |
294 | struct info *info; | 347 | u8 ftop; |
295 | u32 entry_eip; | 348 | u8 changed; |
349 | u8 lookahead; | ||
350 | u8 no_update; | ||
351 | u8 rm; | ||
352 | u8 alimit; | ||
353 | struct info *info; | ||
354 | u32 entry_eip; | ||
296 | }; | 355 | }; |
297 | 356 | ||
298 | union i387_union { | 357 | union i387_union { |
299 | struct i387_fsave_struct fsave; | 358 | struct i387_fsave_struct fsave; |
300 | struct i387_fxsave_struct fxsave; | 359 | struct i387_fxsave_struct fxsave; |
301 | struct i387_soft_struct soft; | 360 | struct i387_soft_struct soft; |
302 | }; | 361 | }; |
303 | 362 | ||
304 | #ifdef CONFIG_X86_32 | 363 | #ifdef CONFIG_X86_64 |
305 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
306 | #else | ||
307 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 364 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
308 | #endif | 365 | #endif |
309 | 366 | ||
@@ -313,42 +370,50 @@ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | |||
313 | extern unsigned short num_cache_leaves; | 370 | extern unsigned short num_cache_leaves; |
314 | 371 | ||
315 | struct thread_struct { | 372 | struct thread_struct { |
316 | /* cached TLS descriptors. */ | 373 | /* Cached TLS descriptors: */ |
317 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 374 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
318 | unsigned long sp0; | 375 | unsigned long sp0; |
319 | unsigned long sp; | 376 | unsigned long sp; |
320 | #ifdef CONFIG_X86_32 | 377 | #ifdef CONFIG_X86_32 |
321 | unsigned long sysenter_cs; | 378 | unsigned long sysenter_cs; |
322 | #else | 379 | #else |
323 | unsigned long usersp; /* Copy from PDA */ | 380 | unsigned long usersp; /* Copy from PDA */ |
324 | unsigned short es, ds, fsindex, gsindex; | 381 | unsigned short es; |
382 | unsigned short ds; | ||
383 | unsigned short fsindex; | ||
384 | unsigned short gsindex; | ||
325 | #endif | 385 | #endif |
326 | unsigned long ip; | 386 | unsigned long ip; |
327 | unsigned long fs; | 387 | unsigned long fs; |
328 | unsigned long gs; | 388 | unsigned long gs; |
329 | /* Hardware debugging registers */ | 389 | /* Hardware debugging registers: */ |
330 | unsigned long debugreg0; | 390 | unsigned long debugreg0; |
331 | unsigned long debugreg1; | 391 | unsigned long debugreg1; |
332 | unsigned long debugreg2; | 392 | unsigned long debugreg2; |
333 | unsigned long debugreg3; | 393 | unsigned long debugreg3; |
334 | unsigned long debugreg6; | 394 | unsigned long debugreg6; |
335 | unsigned long debugreg7; | 395 | unsigned long debugreg7; |
336 | /* fault info */ | 396 | /* Fault info: */ |
337 | unsigned long cr2, trap_no, error_code; | 397 | unsigned long cr2; |
338 | /* floating point info */ | 398 | unsigned long trap_no; |
399 | unsigned long error_code; | ||
400 | /* Floating point info: */ | ||
339 | union i387_union i387 __attribute__((aligned(16)));; | 401 | union i387_union i387 __attribute__((aligned(16)));; |
340 | #ifdef CONFIG_X86_32 | 402 | #ifdef CONFIG_X86_32 |
341 | /* virtual 86 mode info */ | 403 | /* Virtual 86 mode info */ |
342 | struct vm86_struct __user *vm86_info; | 404 | struct vm86_struct __user *vm86_info; |
343 | unsigned long screen_bitmap; | 405 | unsigned long screen_bitmap; |
344 | unsigned long v86flags, v86mask, saved_sp0; | 406 | unsigned long v86flags; |
345 | unsigned int saved_fs, saved_gs; | 407 | unsigned long v86mask; |
408 | unsigned long saved_sp0; | ||
409 | unsigned int saved_fs; | ||
410 | unsigned int saved_gs; | ||
346 | #endif | 411 | #endif |
347 | /* IO permissions */ | 412 | /* IO permissions: */ |
348 | unsigned long *io_bitmap_ptr; | 413 | unsigned long *io_bitmap_ptr; |
349 | unsigned long iopl; | 414 | unsigned long iopl; |
350 | /* max allowed port in the bitmap, in bytes: */ | 415 | /* Max allowed port in the bitmap, in bytes: */ |
351 | unsigned io_bitmap_max; | 416 | unsigned io_bitmap_max; |
352 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ | 417 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ |
353 | unsigned long debugctlmsr; | 418 | unsigned long debugctlmsr; |
354 | /* Debug Store - if not 0 points to a DS Save Area configuration; | 419 | /* Debug Store - if not 0 points to a DS Save Area configuration; |
@@ -358,21 +423,27 @@ struct thread_struct { | |||
358 | 423 | ||
359 | static inline unsigned long native_get_debugreg(int regno) | 424 | static inline unsigned long native_get_debugreg(int regno) |
360 | { | 425 | { |
361 | unsigned long val = 0; /* Damn you, gcc! */ | 426 | unsigned long val = 0; /* Damn you, gcc! */ |
362 | 427 | ||
363 | switch (regno) { | 428 | switch (regno) { |
364 | case 0: | 429 | case 0: |
365 | asm("mov %%db0, %0" :"=r" (val)); break; | 430 | asm("mov %%db0, %0" :"=r" (val)); |
431 | break; | ||
366 | case 1: | 432 | case 1: |
367 | asm("mov %%db1, %0" :"=r" (val)); break; | 433 | asm("mov %%db1, %0" :"=r" (val)); |
434 | break; | ||
368 | case 2: | 435 | case 2: |
369 | asm("mov %%db2, %0" :"=r" (val)); break; | 436 | asm("mov %%db2, %0" :"=r" (val)); |
437 | break; | ||
370 | case 3: | 438 | case 3: |
371 | asm("mov %%db3, %0" :"=r" (val)); break; | 439 | asm("mov %%db3, %0" :"=r" (val)); |
440 | break; | ||
372 | case 6: | 441 | case 6: |
373 | asm("mov %%db6, %0" :"=r" (val)); break; | 442 | asm("mov %%db6, %0" :"=r" (val)); |
443 | break; | ||
374 | case 7: | 444 | case 7: |
375 | asm("mov %%db7, %0" :"=r" (val)); break; | 445 | asm("mov %%db7, %0" :"=r" (val)); |
446 | break; | ||
376 | default: | 447 | default: |
377 | BUG(); | 448 | BUG(); |
378 | } | 449 | } |
@@ -383,22 +454,22 @@ static inline void native_set_debugreg(int regno, unsigned long value) | |||
383 | { | 454 | { |
384 | switch (regno) { | 455 | switch (regno) { |
385 | case 0: | 456 | case 0: |
386 | asm("mov %0,%%db0" : /* no output */ :"r" (value)); | 457 | asm("mov %0, %%db0" ::"r" (value)); |
387 | break; | 458 | break; |
388 | case 1: | 459 | case 1: |
389 | asm("mov %0,%%db1" : /* no output */ :"r" (value)); | 460 | asm("mov %0, %%db1" ::"r" (value)); |
390 | break; | 461 | break; |
391 | case 2: | 462 | case 2: |
392 | asm("mov %0,%%db2" : /* no output */ :"r" (value)); | 463 | asm("mov %0, %%db2" ::"r" (value)); |
393 | break; | 464 | break; |
394 | case 3: | 465 | case 3: |
395 | asm("mov %0,%%db3" : /* no output */ :"r" (value)); | 466 | asm("mov %0, %%db3" ::"r" (value)); |
396 | break; | 467 | break; |
397 | case 6: | 468 | case 6: |
398 | asm("mov %0,%%db6" : /* no output */ :"r" (value)); | 469 | asm("mov %0, %%db6" ::"r" (value)); |
399 | break; | 470 | break; |
400 | case 7: | 471 | case 7: |
401 | asm("mov %0,%%db7" : /* no output */ :"r" (value)); | 472 | asm("mov %0, %%db7" ::"r" (value)); |
402 | break; | 473 | break; |
403 | default: | 474 | default: |
404 | BUG(); | 475 | BUG(); |
@@ -412,23 +483,24 @@ static inline void native_set_iopl_mask(unsigned mask) | |||
412 | { | 483 | { |
413 | #ifdef CONFIG_X86_32 | 484 | #ifdef CONFIG_X86_32 |
414 | unsigned int reg; | 485 | unsigned int reg; |
415 | __asm__ __volatile__ ("pushfl;" | 486 | |
416 | "popl %0;" | 487 | asm volatile ("pushfl;" |
417 | "andl %1, %0;" | 488 | "popl %0;" |
418 | "orl %2, %0;" | 489 | "andl %1, %0;" |
419 | "pushl %0;" | 490 | "orl %2, %0;" |
420 | "popfl" | 491 | "pushl %0;" |
421 | : "=&r" (reg) | 492 | "popfl" |
422 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | 493 | : "=&r" (reg) |
494 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
423 | #endif | 495 | #endif |
424 | } | 496 | } |
425 | 497 | ||
426 | static inline void native_load_sp0(struct tss_struct *tss, | 498 | static inline void |
427 | struct thread_struct *thread) | 499 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) |
428 | { | 500 | { |
429 | tss->x86_tss.sp0 = thread->sp0; | 501 | tss->x86_tss.sp0 = thread->sp0; |
430 | #ifdef CONFIG_X86_32 | 502 | #ifdef CONFIG_X86_32 |
431 | /* Only happens when SEP is enabled, no need to test "SEP"arately */ | 503 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
432 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | 504 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
433 | tss->x86_tss.ss1 = thread->sysenter_cs; | 505 | tss->x86_tss.ss1 = thread->sysenter_cs; |
434 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | 506 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
@@ -446,8 +518,8 @@ static inline void native_swapgs(void) | |||
446 | #ifdef CONFIG_PARAVIRT | 518 | #ifdef CONFIG_PARAVIRT |
447 | #include <asm/paravirt.h> | 519 | #include <asm/paravirt.h> |
448 | #else | 520 | #else |
449 | #define __cpuid native_cpuid | 521 | #define __cpuid native_cpuid |
450 | #define paravirt_enabled() 0 | 522 | #define paravirt_enabled() 0 |
451 | 523 | ||
452 | /* | 524 | /* |
453 | * These special macros can be used to get or set a debugging register | 525 | * These special macros can be used to get or set a debugging register |
@@ -473,11 +545,12 @@ static inline void load_sp0(struct tss_struct *tss, | |||
473 | * enable), so that any CPU's that boot up | 545 | * enable), so that any CPU's that boot up |
474 | * after us can get the correct flags. | 546 | * after us can get the correct flags. |
475 | */ | 547 | */ |
476 | extern unsigned long mmu_cr4_features; | 548 | extern unsigned long mmu_cr4_features; |
477 | 549 | ||
478 | static inline void set_in_cr4(unsigned long mask) | 550 | static inline void set_in_cr4(unsigned long mask) |
479 | { | 551 | { |
480 | unsigned cr4; | 552 | unsigned cr4; |
553 | |||
481 | mmu_cr4_features |= mask; | 554 | mmu_cr4_features |= mask; |
482 | cr4 = read_cr4(); | 555 | cr4 = read_cr4(); |
483 | cr4 |= mask; | 556 | cr4 |= mask; |
@@ -487,6 +560,7 @@ static inline void set_in_cr4(unsigned long mask) | |||
487 | static inline void clear_in_cr4(unsigned long mask) | 560 | static inline void clear_in_cr4(unsigned long mask) |
488 | { | 561 | { |
489 | unsigned cr4; | 562 | unsigned cr4; |
563 | |||
490 | mmu_cr4_features &= ~mask; | 564 | mmu_cr4_features &= ~mask; |
491 | cr4 = read_cr4(); | 565 | cr4 = read_cr4(); |
492 | cr4 &= ~mask; | 566 | cr4 &= ~mask; |
@@ -494,42 +568,42 @@ static inline void clear_in_cr4(unsigned long mask) | |||
494 | } | 568 | } |
495 | 569 | ||
496 | struct microcode_header { | 570 | struct microcode_header { |
497 | unsigned int hdrver; | 571 | unsigned int hdrver; |
498 | unsigned int rev; | 572 | unsigned int rev; |
499 | unsigned int date; | 573 | unsigned int date; |
500 | unsigned int sig; | 574 | unsigned int sig; |
501 | unsigned int cksum; | 575 | unsigned int cksum; |
502 | unsigned int ldrver; | 576 | unsigned int ldrver; |
503 | unsigned int pf; | 577 | unsigned int pf; |
504 | unsigned int datasize; | 578 | unsigned int datasize; |
505 | unsigned int totalsize; | 579 | unsigned int totalsize; |
506 | unsigned int reserved[3]; | 580 | unsigned int reserved[3]; |
507 | }; | 581 | }; |
508 | 582 | ||
509 | struct microcode { | 583 | struct microcode { |
510 | struct microcode_header hdr; | 584 | struct microcode_header hdr; |
511 | unsigned int bits[0]; | 585 | unsigned int bits[0]; |
512 | }; | 586 | }; |
513 | 587 | ||
514 | typedef struct microcode microcode_t; | 588 | typedef struct microcode microcode_t; |
515 | typedef struct microcode_header microcode_header_t; | 589 | typedef struct microcode_header microcode_header_t; |
516 | 590 | ||
517 | /* microcode format is extended from prescott processors */ | 591 | /* microcode format is extended from prescott processors */ |
518 | struct extended_signature { | 592 | struct extended_signature { |
519 | unsigned int sig; | 593 | unsigned int sig; |
520 | unsigned int pf; | 594 | unsigned int pf; |
521 | unsigned int cksum; | 595 | unsigned int cksum; |
522 | }; | 596 | }; |
523 | 597 | ||
524 | struct extended_sigtable { | 598 | struct extended_sigtable { |
525 | unsigned int count; | 599 | unsigned int count; |
526 | unsigned int cksum; | 600 | unsigned int cksum; |
527 | unsigned int reserved[3]; | 601 | unsigned int reserved[3]; |
528 | struct extended_signature sigs[0]; | 602 | struct extended_signature sigs[0]; |
529 | }; | 603 | }; |
530 | 604 | ||
531 | typedef struct { | 605 | typedef struct { |
532 | unsigned long seg; | 606 | unsigned long seg; |
533 | } mm_segment_t; | 607 | } mm_segment_t; |
534 | 608 | ||
535 | 609 | ||
@@ -541,7 +615,7 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |||
541 | /* Free all resources held by a thread. */ | 615 | /* Free all resources held by a thread. */ |
542 | extern void release_thread(struct task_struct *); | 616 | extern void release_thread(struct task_struct *); |
543 | 617 | ||
544 | /* Prepare to copy thread state - unlazy all lazy status */ | 618 | /* Prepare to copy thread state - unlazy all lazy state */ |
545 | extern void prepare_to_copy(struct task_struct *tsk); | 619 | extern void prepare_to_copy(struct task_struct *tsk); |
546 | 620 | ||
547 | unsigned long get_wchan(struct task_struct *p); | 621 | unsigned long get_wchan(struct task_struct *p); |
@@ -578,118 +652,137 @@ static inline unsigned int cpuid_eax(unsigned int op) | |||
578 | unsigned int eax, ebx, ecx, edx; | 652 | unsigned int eax, ebx, ecx, edx; |
579 | 653 | ||
580 | cpuid(op, &eax, &ebx, &ecx, &edx); | 654 | cpuid(op, &eax, &ebx, &ecx, &edx); |
655 | |||
581 | return eax; | 656 | return eax; |
582 | } | 657 | } |
658 | |||
583 | static inline unsigned int cpuid_ebx(unsigned int op) | 659 | static inline unsigned int cpuid_ebx(unsigned int op) |
584 | { | 660 | { |
585 | unsigned int eax, ebx, ecx, edx; | 661 | unsigned int eax, ebx, ecx, edx; |
586 | 662 | ||
587 | cpuid(op, &eax, &ebx, &ecx, &edx); | 663 | cpuid(op, &eax, &ebx, &ecx, &edx); |
664 | |||
588 | return ebx; | 665 | return ebx; |
589 | } | 666 | } |
667 | |||
590 | static inline unsigned int cpuid_ecx(unsigned int op) | 668 | static inline unsigned int cpuid_ecx(unsigned int op) |
591 | { | 669 | { |
592 | unsigned int eax, ebx, ecx, edx; | 670 | unsigned int eax, ebx, ecx, edx; |
593 | 671 | ||
594 | cpuid(op, &eax, &ebx, &ecx, &edx); | 672 | cpuid(op, &eax, &ebx, &ecx, &edx); |
673 | |||
595 | return ecx; | 674 | return ecx; |
596 | } | 675 | } |
676 | |||
597 | static inline unsigned int cpuid_edx(unsigned int op) | 677 | static inline unsigned int cpuid_edx(unsigned int op) |
598 | { | 678 | { |
599 | unsigned int eax, ebx, ecx, edx; | 679 | unsigned int eax, ebx, ecx, edx; |
600 | 680 | ||
601 | cpuid(op, &eax, &ebx, &ecx, &edx); | 681 | cpuid(op, &eax, &ebx, &ecx, &edx); |
682 | |||
602 | return edx; | 683 | return edx; |
603 | } | 684 | } |
604 | 685 | ||
605 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 686 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
606 | static inline void rep_nop(void) | 687 | static inline void rep_nop(void) |
607 | { | 688 | { |
608 | __asm__ __volatile__("rep;nop": : :"memory"); | 689 | asm volatile("rep; nop" ::: "memory"); |
609 | } | 690 | } |
610 | 691 | ||
611 | /* Stop speculative execution */ | 692 | static inline void cpu_relax(void) |
693 | { | ||
694 | rep_nop(); | ||
695 | } | ||
696 | |||
697 | /* Stop speculative execution: */ | ||
612 | static inline void sync_core(void) | 698 | static inline void sync_core(void) |
613 | { | 699 | { |
614 | int tmp; | 700 | int tmp; |
701 | |||
615 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | 702 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) |
616 | : "ebx", "ecx", "edx", "memory"); | 703 | : "ebx", "ecx", "edx", "memory"); |
617 | } | 704 | } |
618 | 705 | ||
619 | #define cpu_relax() rep_nop() | ||
620 | |||
621 | static inline void __monitor(const void *eax, unsigned long ecx, | 706 | static inline void __monitor(const void *eax, unsigned long ecx, |
622 | unsigned long edx) | 707 | unsigned long edx) |
623 | { | 708 | { |
624 | /* "monitor %eax,%ecx,%edx;" */ | 709 | /* "monitor %eax, %ecx, %edx;" */ |
625 | asm volatile( | 710 | asm volatile(".byte 0x0f, 0x01, 0xc8;" |
626 | ".byte 0x0f,0x01,0xc8;" | 711 | :: "a" (eax), "c" (ecx), "d"(edx)); |
627 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
628 | } | 712 | } |
629 | 713 | ||
630 | static inline void __mwait(unsigned long eax, unsigned long ecx) | 714 | static inline void __mwait(unsigned long eax, unsigned long ecx) |
631 | { | 715 | { |
632 | /* "mwait %eax,%ecx;" */ | 716 | /* "mwait %eax, %ecx;" */ |
633 | asm volatile( | 717 | asm volatile(".byte 0x0f, 0x01, 0xc9;" |
634 | ".byte 0x0f,0x01,0xc9;" | 718 | :: "a" (eax), "c" (ecx)); |
635 | : :"a" (eax), "c" (ecx)); | ||
636 | } | 719 | } |
637 | 720 | ||
638 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | 721 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) |
639 | { | 722 | { |
640 | /* "mwait %eax,%ecx;" */ | 723 | /* "mwait %eax, %ecx;" */ |
641 | asm volatile( | 724 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
642 | "sti; .byte 0x0f,0x01,0xc9;" | 725 | :: "a" (eax), "c" (ecx)); |
643 | : :"a" (eax), "c" (ecx)); | ||
644 | } | 726 | } |
645 | 727 | ||
646 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 728 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
647 | 729 | ||
648 | extern int force_mwait; | 730 | extern int force_mwait; |
649 | 731 | ||
650 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 732 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
651 | 733 | ||
652 | extern unsigned long boot_option_idle_override; | 734 | extern unsigned long boot_option_idle_override; |
653 | 735 | ||
654 | extern void enable_sep_cpu(void); | 736 | extern void enable_sep_cpu(void); |
655 | extern int sysenter_setup(void); | 737 | extern int sysenter_setup(void); |
656 | 738 | ||
657 | /* Defined in head.S */ | 739 | /* Defined in head.S */ |
658 | extern struct desc_ptr early_gdt_descr; | 740 | extern struct desc_ptr early_gdt_descr; |
659 | 741 | ||
660 | extern void cpu_set_gdt(int); | 742 | extern void cpu_set_gdt(int); |
661 | extern void switch_to_new_gdt(void); | 743 | extern void switch_to_new_gdt(void); |
662 | extern void cpu_init(void); | 744 | extern void cpu_init(void); |
663 | extern void init_gdt(int cpu); | 745 | extern void init_gdt(int cpu); |
664 | 746 | ||
665 | /* from system description table in BIOS. Mostly for MCA use, but | 747 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
666 | * others may find it useful. */ | 748 | { |
667 | extern unsigned int machine_id; | 749 | #ifndef CONFIG_X86_DEBUGCTLMSR |
668 | extern unsigned int machine_submodel_id; | 750 | if (boot_cpu_data.x86 < 6) |
669 | extern unsigned int BIOS_revision; | 751 | return; |
752 | #endif | ||
753 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | ||
754 | } | ||
670 | 755 | ||
671 | /* Boot loader type from the setup header */ | 756 | /* |
672 | extern int bootloader_type; | 757 | * from system description table in BIOS. Mostly for MCA use, but |
758 | * others may find it useful: | ||
759 | */ | ||
760 | extern unsigned int machine_id; | ||
761 | extern unsigned int machine_submodel_id; | ||
762 | extern unsigned int BIOS_revision; | ||
763 | |||
764 | /* Boot loader type from the setup header: */ | ||
765 | extern int bootloader_type; | ||
673 | 766 | ||
674 | extern char ignore_fpu_irq; | 767 | extern char ignore_fpu_irq; |
675 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
676 | 768 | ||
677 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | 769 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 |
678 | #define ARCH_HAS_PREFETCHW | 770 | #define ARCH_HAS_PREFETCHW |
679 | #define ARCH_HAS_SPINLOCK_PREFETCH | 771 | #define ARCH_HAS_SPINLOCK_PREFETCH |
680 | 772 | ||
681 | #ifdef CONFIG_X86_32 | 773 | #ifdef CONFIG_X86_32 |
682 | #define BASE_PREFETCH ASM_NOP4 | 774 | # define BASE_PREFETCH ASM_NOP4 |
683 | #define ARCH_HAS_PREFETCH | 775 | # define ARCH_HAS_PREFETCH |
684 | #else | 776 | #else |
685 | #define BASE_PREFETCH "prefetcht0 (%1)" | 777 | # define BASE_PREFETCH "prefetcht0 (%1)" |
686 | #endif | 778 | #endif |
687 | 779 | ||
688 | /* Prefetch instructions for Pentium III and AMD Athlon */ | 780 | /* |
689 | /* It's not worth to care about 3dnow! prefetches for the K6 | 781 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) |
690 | because they are microcoded there and very slow. | 782 | * |
691 | However we don't do prefetches for pre XP Athlons currently | 783 | * It's not worth to care about 3dnow prefetches for the K6 |
692 | That should be fixed. */ | 784 | * because they are microcoded there and very slow. |
785 | */ | ||
693 | static inline void prefetch(const void *x) | 786 | static inline void prefetch(const void *x) |
694 | { | 787 | { |
695 | alternative_input(BASE_PREFETCH, | 788 | alternative_input(BASE_PREFETCH, |
@@ -698,8 +791,11 @@ static inline void prefetch(const void *x) | |||
698 | "r" (x)); | 791 | "r" (x)); |
699 | } | 792 | } |
700 | 793 | ||
701 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 794 | /* |
702 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 795 | * 3dnow prefetch to get an exclusive cache line. |
796 | * Useful for spinlocks to avoid one state transition in the | ||
797 | * cache coherency protocol: | ||
798 | */ | ||
703 | static inline void prefetchw(const void *x) | 799 | static inline void prefetchw(const void *x) |
704 | { | 800 | { |
705 | alternative_input(BASE_PREFETCH, | 801 | alternative_input(BASE_PREFETCH, |
@@ -708,21 +804,25 @@ static inline void prefetchw(const void *x) | |||
708 | "r" (x)); | 804 | "r" (x)); |
709 | } | 805 | } |
710 | 806 | ||
711 | #define spin_lock_prefetch(x) prefetchw(x) | 807 | static inline void spin_lock_prefetch(const void *x) |
808 | { | ||
809 | prefetchw(x); | ||
810 | } | ||
811 | |||
712 | #ifdef CONFIG_X86_32 | 812 | #ifdef CONFIG_X86_32 |
713 | /* | 813 | /* |
714 | * User space process size: 3GB (default). | 814 | * User space process size: 3GB (default). |
715 | */ | 815 | */ |
716 | #define TASK_SIZE (PAGE_OFFSET) | 816 | #define TASK_SIZE PAGE_OFFSET |
717 | #define STACK_TOP TASK_SIZE | 817 | #define STACK_TOP TASK_SIZE |
718 | #define STACK_TOP_MAX STACK_TOP | 818 | #define STACK_TOP_MAX STACK_TOP |
719 | 819 | ||
720 | #define INIT_THREAD { \ | 820 | #define INIT_THREAD { \ |
721 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | 821 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ |
722 | .vm86_info = NULL, \ | 822 | .vm86_info = NULL, \ |
723 | .sysenter_cs = __KERNEL_CS, \ | 823 | .sysenter_cs = __KERNEL_CS, \ |
724 | .io_bitmap_ptr = NULL, \ | 824 | .io_bitmap_ptr = NULL, \ |
725 | .fs = __KERNEL_PERCPU, \ | 825 | .fs = __KERNEL_PERCPU, \ |
726 | } | 826 | } |
727 | 827 | ||
728 | /* | 828 | /* |
@@ -731,28 +831,15 @@ static inline void prefetchw(const void *x) | |||
731 | * permission bitmap. The extra byte must be all 1 bits, and must | 831 | * permission bitmap. The extra byte must be all 1 bits, and must |
732 | * be within the limit. | 832 | * be within the limit. |
733 | */ | 833 | */ |
734 | #define INIT_TSS { \ | 834 | #define INIT_TSS { \ |
735 | .x86_tss = { \ | 835 | .x86_tss = { \ |
736 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | 836 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ |
737 | .ss0 = __KERNEL_DS, \ | 837 | .ss0 = __KERNEL_DS, \ |
738 | .ss1 = __KERNEL_CS, \ | 838 | .ss1 = __KERNEL_CS, \ |
739 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | 839 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ |
740 | }, \ | 840 | }, \ |
741 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ | 841 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ |
742 | } | 842 | } |
743 | |||
744 | #define start_thread(regs, new_eip, new_esp) do { \ | ||
745 | __asm__("movl %0,%%gs": :"r" (0)); \ | ||
746 | regs->fs = 0; \ | ||
747 | set_fs(USER_DS); \ | ||
748 | regs->ds = __USER_DS; \ | ||
749 | regs->es = __USER_DS; \ | ||
750 | regs->ss = __USER_DS; \ | ||
751 | regs->cs = __USER_CS; \ | ||
752 | regs->ip = new_eip; \ | ||
753 | regs->sp = new_esp; \ | ||
754 | } while (0) | ||
755 | |||
756 | 843 | ||
757 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | 844 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
758 | 845 | ||
@@ -780,24 +867,24 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
780 | __regs__ - 1; \ | 867 | __regs__ - 1; \ |
781 | }) | 868 | }) |
782 | 869 | ||
783 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 870 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
784 | 871 | ||
785 | #else | 872 | #else |
786 | /* | 873 | /* |
787 | * User space process size. 47bits minus one guard page. | 874 | * User space process size. 47bits minus one guard page. |
788 | */ | 875 | */ |
789 | #define TASK_SIZE64 (0x800000000000UL - 4096) | 876 | #define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) |
790 | 877 | ||
791 | /* This decides where the kernel will search for a free chunk of vm | 878 | /* This decides where the kernel will search for a free chunk of vm |
792 | * space during mmap's. | 879 | * space during mmap's. |
793 | */ | 880 | */ |
794 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | 881 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
795 | 0xc0000000 : 0xFFFFe000) | 882 | 0xc0000000 : 0xFFFFe000) |
796 | 883 | ||
797 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ | 884 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ |
798 | IA32_PAGE_OFFSET : TASK_SIZE64) | 885 | IA32_PAGE_OFFSET : TASK_SIZE64) |
799 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ | 886 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ |
800 | IA32_PAGE_OFFSET : TASK_SIZE64) | 887 | IA32_PAGE_OFFSET : TASK_SIZE64) |
801 | 888 | ||
802 | #define STACK_TOP TASK_SIZE | 889 | #define STACK_TOP TASK_SIZE |
803 | #define STACK_TOP_MAX TASK_SIZE64 | 890 | #define STACK_TOP_MAX TASK_SIZE64 |
@@ -810,33 +897,25 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
810 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 897 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ |
811 | } | 898 | } |
812 | 899 | ||
813 | #define start_thread(regs, new_rip, new_rsp) do { \ | ||
814 | asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \ | ||
815 | load_gs_index(0); \ | ||
816 | (regs)->ip = (new_rip); \ | ||
817 | (regs)->sp = (new_rsp); \ | ||
818 | write_pda(oldrsp, (new_rsp)); \ | ||
819 | (regs)->cs = __USER_CS; \ | ||
820 | (regs)->ss = __USER_DS; \ | ||
821 | (regs)->flags = 0x200; \ | ||
822 | set_fs(USER_DS); \ | ||
823 | } while (0) | ||
824 | |||
825 | /* | 900 | /* |
826 | * Return saved PC of a blocked thread. | 901 | * Return saved PC of a blocked thread. |
827 | * What is this good for? it will be always the scheduler or ret_from_fork. | 902 | * What is this good for? it will be always the scheduler or ret_from_fork. |
828 | */ | 903 | */ |
829 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | 904 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
830 | 905 | ||
831 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 906 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
832 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 907 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ |
833 | #endif /* CONFIG_X86_64 */ | 908 | #endif /* CONFIG_X86_64 */ |
834 | 909 | ||
835 | /* This decides where the kernel will search for a free chunk of vm | 910 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
911 | unsigned long new_sp); | ||
912 | |||
913 | /* | ||
914 | * This decides where the kernel will search for a free chunk of vm | ||
836 | * space during mmap's. | 915 | * space during mmap's. |
837 | */ | 916 | */ |
838 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | 917 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
839 | 918 | ||
840 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | 919 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
841 | 920 | ||
842 | #endif | 921 | #endif |
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h index 68563c0709ac..1e17bcce450e 100644 --- a/include/asm-x86/proto.h +++ b/include/asm-x86/proto.h | |||
@@ -7,8 +7,6 @@ | |||
7 | 7 | ||
8 | extern void early_idt_handler(void); | 8 | extern void early_idt_handler(void); |
9 | 9 | ||
10 | extern void init_memory_mapping(unsigned long start, unsigned long end); | ||
11 | |||
12 | extern void system_call(void); | 10 | extern void system_call(void); |
13 | extern void syscall_init(void); | 11 | extern void syscall_init(void); |
14 | 12 | ||
@@ -26,7 +24,7 @@ extern int reboot_force; | |||
26 | 24 | ||
27 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 25 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
28 | 26 | ||
29 | #define round_up(x,y) (((x) + (y) - 1) & ~((y)-1)) | 27 | #define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) |
30 | #define round_down(x,y) ((x) & ~((y)-1)) | 28 | #define round_down(x, y) ((x) & ~((y) - 1)) |
31 | 29 | ||
32 | #endif | 30 | #endif |
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h index d9e04b46a440..24ec061566c5 100644 --- a/include/asm-x86/ptrace.h +++ b/include/asm-x86/ptrace.h | |||
@@ -36,23 +36,23 @@ struct pt_regs { | |||
36 | #else /* __KERNEL__ */ | 36 | #else /* __KERNEL__ */ |
37 | 37 | ||
38 | struct pt_regs { | 38 | struct pt_regs { |
39 | long bx; | 39 | unsigned long bx; |
40 | long cx; | 40 | unsigned long cx; |
41 | long dx; | 41 | unsigned long dx; |
42 | long si; | 42 | unsigned long si; |
43 | long di; | 43 | unsigned long di; |
44 | long bp; | 44 | unsigned long bp; |
45 | long ax; | 45 | unsigned long ax; |
46 | int ds; | 46 | unsigned long ds; |
47 | int es; | 47 | unsigned long es; |
48 | int fs; | 48 | unsigned long fs; |
49 | /* int gs; */ | 49 | /* int gs; */ |
50 | long orig_ax; | 50 | unsigned long orig_ax; |
51 | long ip; | 51 | unsigned long ip; |
52 | int cs; | 52 | unsigned long cs; |
53 | long flags; | 53 | unsigned long flags; |
54 | long sp; | 54 | unsigned long sp; |
55 | int ss; | 55 | unsigned long ss; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | #include <asm/vm86.h> | 58 | #include <asm/vm86.h> |
@@ -140,12 +140,16 @@ extern unsigned long | |||
140 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | 140 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
141 | 141 | ||
142 | #ifdef CONFIG_X86_32 | 142 | #ifdef CONFIG_X86_32 |
143 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); | 143 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
144 | int error_code); | ||
144 | #else | 145 | #else |
145 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); | 146 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where); |
146 | #endif | 147 | #endif |
147 | 148 | ||
148 | #define regs_return_value(regs) ((regs)->ax) | 149 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
150 | { | ||
151 | return regs->ax; | ||
152 | } | ||
149 | 153 | ||
150 | /* | 154 | /* |
151 | * user_mode_vm(regs) determines whether a register set came from user mode. | 155 | * user_mode_vm(regs) determines whether a register set came from user mode. |
@@ -166,8 +170,8 @@ static inline int user_mode(struct pt_regs *regs) | |||
166 | static inline int user_mode_vm(struct pt_regs *regs) | 170 | static inline int user_mode_vm(struct pt_regs *regs) |
167 | { | 171 | { |
168 | #ifdef CONFIG_X86_32 | 172 | #ifdef CONFIG_X86_32 |
169 | return ((regs->cs & SEGMENT_RPL_MASK) | | 173 | return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= |
170 | (regs->flags & VM_MASK)) >= USER_RPL; | 174 | USER_RPL; |
171 | #else | 175 | #else |
172 | return user_mode(regs); | 176 | return user_mode(regs); |
173 | #endif | 177 | #endif |
@@ -176,7 +180,7 @@ static inline int user_mode_vm(struct pt_regs *regs) | |||
176 | static inline int v8086_mode(struct pt_regs *regs) | 180 | static inline int v8086_mode(struct pt_regs *regs) |
177 | { | 181 | { |
178 | #ifdef CONFIG_X86_32 | 182 | #ifdef CONFIG_X86_32 |
179 | return (regs->flags & VM_MASK); | 183 | return (regs->flags & X86_VM_MASK); |
180 | #else | 184 | #else |
181 | return 0; /* No V86 mode support in long mode */ | 185 | return 0; /* No V86 mode support in long mode */ |
182 | #endif | 186 | #endif |
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index e9e3ffc22c07..6b5233b4f84b 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h | |||
@@ -3,8 +3,7 @@ | |||
3 | 3 | ||
4 | struct pt_regs; | 4 | struct pt_regs; |
5 | 5 | ||
6 | struct machine_ops | 6 | struct machine_ops { |
7 | { | ||
8 | void (*restart)(char *cmd); | 7 | void (*restart)(char *cmd); |
9 | void (*halt)(void); | 8 | void (*halt)(void); |
10 | void (*power_off)(void); | 9 | void (*power_off)(void); |
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h index 46f725b0bc82..2557514d7ef6 100644 --- a/include/asm-x86/resume-trace.h +++ b/include/asm-x86/resume-trace.h | |||
@@ -3,16 +3,17 @@ | |||
3 | 3 | ||
4 | #include <asm/asm.h> | 4 | #include <asm/asm.h> |
5 | 5 | ||
6 | #define TRACE_RESUME(user) do { \ | 6 | #define TRACE_RESUME(user) \ |
7 | do { \ | ||
7 | if (pm_trace_enabled) { \ | 8 | if (pm_trace_enabled) { \ |
8 | void *tracedata; \ | 9 | void *tracedata; \ |
9 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ | 10 | asm volatile(_ASM_MOV_UL " $1f,%0\n" \ |
10 | ".section .tracedata,\"a\"\n" \ | 11 | ".section .tracedata,\"a\"\n" \ |
11 | "1:\t.word %c1\n\t" \ | 12 | "1:\t.word %c1\n\t" \ |
12 | _ASM_PTR " %c2\n" \ | 13 | _ASM_PTR " %c2\n" \ |
13 | ".previous" \ | 14 | ".previous" \ |
14 | :"=r" (tracedata) \ | 15 | :"=r" (tracedata) \ |
15 | : "i" (__LINE__), "i" (__FILE__)); \ | 16 | : "i" (__LINE__), "i" (__FILE__)); \ |
16 | generate_resume_trace(tracedata, user); \ | 17 | generate_resume_trace(tracedata, user); \ |
17 | } \ | 18 | } \ |
18 | } while (0) | 19 | } while (0) |
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h index 97cdcc9887ba..3451c576e6af 100644 --- a/include/asm-x86/rio.h +++ b/include/asm-x86/rio.h | |||
@@ -11,53 +11,53 @@ | |||
11 | #define RIO_TABLE_VERSION 3 | 11 | #define RIO_TABLE_VERSION 3 |
12 | 12 | ||
13 | struct rio_table_hdr { | 13 | struct rio_table_hdr { |
14 | u8 version; /* Version number of this data structure */ | 14 | u8 version; /* Version number of this data structure */ |
15 | u8 num_scal_dev; /* # of Scalability devices */ | 15 | u8 num_scal_dev; /* # of Scalability devices */ |
16 | u8 num_rio_dev; /* # of RIO I/O devices */ | 16 | u8 num_rio_dev; /* # of RIO I/O devices */ |
17 | } __attribute__((packed)); | 17 | } __attribute__((packed)); |
18 | 18 | ||
19 | struct scal_detail { | 19 | struct scal_detail { |
20 | u8 node_id; /* Scalability Node ID */ | 20 | u8 node_id; /* Scalability Node ID */ |
21 | u32 CBAR; /* Address of 1MB register space */ | 21 | u32 CBAR; /* Address of 1MB register space */ |
22 | u8 port0node; /* Node ID port connected to: 0xFF=None */ | 22 | u8 port0node; /* Node ID port connected to: 0xFF=None */ |
23 | u8 port0port; /* Port num port connected to: 0,1,2, or */ | 23 | u8 port0port; /* Port num port connected to: 0,1,2, or */ |
24 | /* 0xFF=None */ | 24 | /* 0xFF=None */ |
25 | u8 port1node; /* Node ID port connected to: 0xFF = None */ | 25 | u8 port1node; /* Node ID port connected to: 0xFF = None */ |
26 | u8 port1port; /* Port num port connected to: 0,1,2, or */ | 26 | u8 port1port; /* Port num port connected to: 0,1,2, or */ |
27 | /* 0xFF=None */ | 27 | /* 0xFF=None */ |
28 | u8 port2node; /* Node ID port connected to: 0xFF = None */ | 28 | u8 port2node; /* Node ID port connected to: 0xFF = None */ |
29 | u8 port2port; /* Port num port connected to: 0,1,2, or */ | 29 | u8 port2port; /* Port num port connected to: 0,1,2, or */ |
30 | /* 0xFF=None */ | 30 | /* 0xFF=None */ |
31 | u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ | 31 | u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ |
32 | } __attribute__((packed)); | 32 | } __attribute__((packed)); |
33 | 33 | ||
34 | struct rio_detail { | 34 | struct rio_detail { |
35 | u8 node_id; /* RIO Node ID */ | 35 | u8 node_id; /* RIO Node ID */ |
36 | u32 BBAR; /* Address of 1MB register space */ | 36 | u32 BBAR; /* Address of 1MB register space */ |
37 | u8 type; /* Type of device */ | 37 | u8 type; /* Type of device */ |
38 | u8 owner_id; /* Node ID of Hurricane that owns this */ | 38 | u8 owner_id; /* Node ID of Hurricane that owns this */ |
39 | /* node */ | 39 | /* node */ |
40 | u8 port0node; /* Node ID port connected to: 0xFF=None */ | 40 | u8 port0node; /* Node ID port connected to: 0xFF=None */ |
41 | u8 port0port; /* Port num port connected to: 0,1,2, or */ | 41 | u8 port0port; /* Port num port connected to: 0,1,2, or */ |
42 | /* 0xFF=None */ | 42 | /* 0xFF=None */ |
43 | u8 port1node; /* Node ID port connected to: 0xFF=None */ | 43 | u8 port1node; /* Node ID port connected to: 0xFF=None */ |
44 | u8 port1port; /* Port num port connected to: 0,1,2, or */ | 44 | u8 port1port; /* Port num port connected to: 0,1,2, or */ |
45 | /* 0xFF=None */ | 45 | /* 0xFF=None */ |
46 | u8 first_slot; /* Lowest slot number below this Calgary */ | 46 | u8 first_slot; /* Lowest slot number below this Calgary */ |
47 | u8 status; /* Bit 0 = 1 : the XAPIC is used */ | 47 | u8 status; /* Bit 0 = 1 : the XAPIC is used */ |
48 | /* = 0 : the XAPIC is not used, ie: */ | 48 | /* = 0 : the XAPIC is not used, ie: */ |
49 | /* ints fwded to another XAPIC */ | 49 | /* ints fwded to another XAPIC */ |
50 | /* Bits1:7 Reserved */ | 50 | /* Bits1:7 Reserved */ |
51 | u8 WP_index; /* instance index - lower ones have */ | 51 | u8 WP_index; /* instance index - lower ones have */ |
52 | /* lower slot numbers/PCI bus numbers */ | 52 | /* lower slot numbers/PCI bus numbers */ |
53 | u8 chassis_num; /* 1 based Chassis number */ | 53 | u8 chassis_num; /* 1 based Chassis number */ |
54 | } __attribute__((packed)); | 54 | } __attribute__((packed)); |
55 | 55 | ||
56 | enum { | 56 | enum { |
57 | HURR_SCALABILTY = 0, /* Hurricane Scalability info */ | 57 | HURR_SCALABILTY = 0, /* Hurricane Scalability info */ |
58 | HURR_RIOIB = 2, /* Hurricane RIOIB info */ | 58 | HURR_RIOIB = 2, /* Hurricane RIOIB info */ |
59 | COMPAT_CALGARY = 4, /* Compatibility Calgary */ | 59 | COMPAT_CALGARY = 4, /* Compatibility Calgary */ |
60 | ALT_CALGARY = 5, /* Second Planar Calgary */ | 60 | ALT_CALGARY = 5, /* Second Planar Calgary */ |
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* | 63 | /* |
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h index 520a379f4b80..750f2a3542b3 100644 --- a/include/asm-x86/rwsem.h +++ b/include/asm-x86/rwsem.h | |||
@@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore * | |||
56 | /* | 56 | /* |
57 | * the semaphore definition | 57 | * the semaphore definition |
58 | */ | 58 | */ |
59 | struct rw_semaphore { | 59 | |
60 | signed long count; | ||
61 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 60 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
62 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 61 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
63 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 62 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
64 | #define RWSEM_WAITING_BIAS (-0x00010000) | 63 | #define RWSEM_WAITING_BIAS (-0x00010000) |
65 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 64 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
66 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 65 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
66 | |||
67 | struct rw_semaphore { | ||
68 | signed long count; | ||
67 | spinlock_t wait_lock; | 69 | spinlock_t wait_lock; |
68 | struct list_head wait_list; | 70 | struct list_head wait_list; |
69 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 71 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -78,11 +80,13 @@ struct rw_semaphore { | |||
78 | #endif | 80 | #endif |
79 | 81 | ||
80 | 82 | ||
81 | #define __RWSEM_INITIALIZER(name) \ | 83 | #define __RWSEM_INITIALIZER(name) \ |
82 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | 84 | { \ |
83 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | 85 | RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ |
86 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \ | ||
87 | } | ||
84 | 88 | ||
85 | #define DECLARE_RWSEM(name) \ | 89 | #define DECLARE_RWSEM(name) \ |
86 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 90 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
87 | 91 | ||
88 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | 92 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
@@ -100,16 +104,16 @@ do { \ | |||
100 | */ | 104 | */ |
101 | static inline void __down_read(struct rw_semaphore *sem) | 105 | static inline void __down_read(struct rw_semaphore *sem) |
102 | { | 106 | { |
103 | __asm__ __volatile__( | 107 | asm volatile("# beginning down_read\n\t" |
104 | "# beginning down_read\n\t" | 108 | LOCK_PREFIX " incl (%%eax)\n\t" |
105 | LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ | 109 | /* adds 0x00000001, returns the old value */ |
106 | " jns 1f\n" | 110 | " jns 1f\n" |
107 | " call call_rwsem_down_read_failed\n" | 111 | " call call_rwsem_down_read_failed\n" |
108 | "1:\n\t" | 112 | "1:\n\t" |
109 | "# ending down_read\n\t" | 113 | "# ending down_read\n\t" |
110 | : "+m" (sem->count) | 114 | : "+m" (sem->count) |
111 | : "a" (sem) | 115 | : "a" (sem) |
112 | : "memory", "cc"); | 116 | : "memory", "cc"); |
113 | } | 117 | } |
114 | 118 | ||
115 | /* | 119 | /* |
@@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value | |||
118 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 122 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
119 | { | 123 | { |
120 | __s32 result, tmp; | 124 | __s32 result, tmp; |
121 | __asm__ __volatile__( | 125 | asm volatile("# beginning __down_read_trylock\n\t" |
122 | "# beginning __down_read_trylock\n\t" | 126 | " movl %0,%1\n\t" |
123 | " movl %0,%1\n\t" | 127 | "1:\n\t" |
124 | "1:\n\t" | 128 | " movl %1,%2\n\t" |
125 | " movl %1,%2\n\t" | 129 | " addl %3,%2\n\t" |
126 | " addl %3,%2\n\t" | 130 | " jle 2f\n\t" |
127 | " jle 2f\n\t" | 131 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" |
128 | LOCK_PREFIX " cmpxchgl %2,%0\n\t" | 132 | " jnz 1b\n\t" |
129 | " jnz 1b\n\t" | 133 | "2:\n\t" |
130 | "2:\n\t" | 134 | "# ending __down_read_trylock\n\t" |
131 | "# ending __down_read_trylock\n\t" | 135 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) |
132 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) | 136 | : "i" (RWSEM_ACTIVE_READ_BIAS) |
133 | : "i" (RWSEM_ACTIVE_READ_BIAS) | 137 | : "memory", "cc"); |
134 | : "memory", "cc"); | 138 | return result >= 0 ? 1 : 0; |
135 | return result>=0 ? 1 : 0; | ||
136 | } | 139 | } |
137 | 140 | ||
138 | /* | 141 | /* |
@@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
143 | int tmp; | 146 | int tmp; |
144 | 147 | ||
145 | tmp = RWSEM_ACTIVE_WRITE_BIAS; | 148 | tmp = RWSEM_ACTIVE_WRITE_BIAS; |
146 | __asm__ __volatile__( | 149 | asm volatile("# beginning down_write\n\t" |
147 | "# beginning down_write\n\t" | 150 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" |
148 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ | 151 | /* subtract 0x0000ffff, returns the old value */ |
149 | " testl %%edx,%%edx\n\t" /* was the count 0 before? */ | 152 | " testl %%edx,%%edx\n\t" |
150 | " jz 1f\n" | 153 | /* was the count 0 before? */ |
151 | " call call_rwsem_down_write_failed\n" | 154 | " jz 1f\n" |
152 | "1:\n" | 155 | " call call_rwsem_down_write_failed\n" |
153 | "# ending down_write" | 156 | "1:\n" |
154 | : "+m" (sem->count), "=d" (tmp) | 157 | "# ending down_write" |
155 | : "a" (sem), "1" (tmp) | 158 | : "+m" (sem->count), "=d" (tmp) |
156 | : "memory", "cc"); | 159 | : "a" (sem), "1" (tmp) |
160 | : "memory", "cc"); | ||
157 | } | 161 | } |
158 | 162 | ||
159 | static inline void __down_write(struct rw_semaphore *sem) | 163 | static inline void __down_write(struct rw_semaphore *sem) |
@@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
167 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 171 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
168 | { | 172 | { |
169 | signed long ret = cmpxchg(&sem->count, | 173 | signed long ret = cmpxchg(&sem->count, |
170 | RWSEM_UNLOCKED_VALUE, | 174 | RWSEM_UNLOCKED_VALUE, |
171 | RWSEM_ACTIVE_WRITE_BIAS); | 175 | RWSEM_ACTIVE_WRITE_BIAS); |
172 | if (ret == RWSEM_UNLOCKED_VALUE) | 176 | if (ret == RWSEM_UNLOCKED_VALUE) |
173 | return 1; | 177 | return 1; |
@@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
180 | static inline void __up_read(struct rw_semaphore *sem) | 184 | static inline void __up_read(struct rw_semaphore *sem) |
181 | { | 185 | { |
182 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; | 186 | __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; |
183 | __asm__ __volatile__( | 187 | asm volatile("# beginning __up_read\n\t" |
184 | "# beginning __up_read\n\t" | 188 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" |
185 | LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ | 189 | /* subtracts 1, returns the old value */ |
186 | " jns 1f\n\t" | 190 | " jns 1f\n\t" |
187 | " call call_rwsem_wake\n" | 191 | " call call_rwsem_wake\n" |
188 | "1:\n" | 192 | "1:\n" |
189 | "# ending __up_read\n" | 193 | "# ending __up_read\n" |
190 | : "+m" (sem->count), "=d" (tmp) | 194 | : "+m" (sem->count), "=d" (tmp) |
191 | : "a" (sem), "1" (tmp) | 195 | : "a" (sem), "1" (tmp) |
192 | : "memory", "cc"); | 196 | : "memory", "cc"); |
193 | } | 197 | } |
194 | 198 | ||
195 | /* | 199 | /* |
@@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu | |||
197 | */ | 201 | */ |
198 | static inline void __up_write(struct rw_semaphore *sem) | 202 | static inline void __up_write(struct rw_semaphore *sem) |
199 | { | 203 | { |
200 | __asm__ __volatile__( | 204 | asm volatile("# beginning __up_write\n\t" |
201 | "# beginning __up_write\n\t" | 205 | " movl %2,%%edx\n\t" |
202 | " movl %2,%%edx\n\t" | 206 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" |
203 | LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ | 207 | /* tries to transition |
204 | " jz 1f\n" | 208 | 0xffff0001 -> 0x00000000 */ |
205 | " call call_rwsem_wake\n" | 209 | " jz 1f\n" |
206 | "1:\n\t" | 210 | " call call_rwsem_wake\n" |
207 | "# ending __up_write\n" | 211 | "1:\n\t" |
208 | : "+m" (sem->count) | 212 | "# ending __up_write\n" |
209 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) | 213 | : "+m" (sem->count) |
210 | : "memory", "cc", "edx"); | 214 | : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) |
215 | : "memory", "cc", "edx"); | ||
211 | } | 216 | } |
212 | 217 | ||
213 | /* | 218 | /* |
@@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> | |||
215 | */ | 220 | */ |
216 | static inline void __downgrade_write(struct rw_semaphore *sem) | 221 | static inline void __downgrade_write(struct rw_semaphore *sem) |
217 | { | 222 | { |
218 | __asm__ __volatile__( | 223 | asm volatile("# beginning __downgrade_write\n\t" |
219 | "# beginning __downgrade_write\n\t" | 224 | LOCK_PREFIX " addl %2,(%%eax)\n\t" |
220 | LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ | 225 | /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ |
221 | " jns 1f\n\t" | 226 | " jns 1f\n\t" |
222 | " call call_rwsem_downgrade_wake\n" | 227 | " call call_rwsem_downgrade_wake\n" |
223 | "1:\n\t" | 228 | "1:\n\t" |
224 | "# ending __downgrade_write\n" | 229 | "# ending __downgrade_write\n" |
225 | : "+m" (sem->count) | 230 | : "+m" (sem->count) |
226 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) | 231 | : "a" (sem), "i" (-RWSEM_WAITING_BIAS) |
227 | : "memory", "cc"); | 232 | : "memory", "cc"); |
228 | } | 233 | } |
229 | 234 | ||
230 | /* | 235 | /* |
@@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 | |||
232 | */ | 237 | */ |
233 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 238 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) |
234 | { | 239 | { |
235 | __asm__ __volatile__( | 240 | asm volatile(LOCK_PREFIX "addl %1,%0" |
236 | LOCK_PREFIX "addl %1,%0" | 241 | : "+m" (sem->count) |
237 | : "+m" (sem->count) | 242 | : "ir" (delta)); |
238 | : "ir" (delta)); | ||
239 | } | 243 | } |
240 | 244 | ||
241 | /* | 245 | /* |
@@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | |||
245 | { | 249 | { |
246 | int tmp = delta; | 250 | int tmp = delta; |
247 | 251 | ||
248 | __asm__ __volatile__( | 252 | asm volatile(LOCK_PREFIX "xadd %0,%1" |
249 | LOCK_PREFIX "xadd %0,%1" | 253 | : "+r" (tmp), "+m" (sem->count) |
250 | : "+r" (tmp), "+m" (sem->count) | 254 | : : "memory"); |
251 | : : "memory"); | ||
252 | 255 | ||
253 | return tmp+delta; | 256 | return tmp + delta; |
254 | } | 257 | } |
255 | 258 | ||
256 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 259 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h index 23f0535fec61..ed5131dd7d92 100644 --- a/include/asm-x86/segment.h +++ b/include/asm-x86/segment.h | |||
@@ -191,13 +191,14 @@ | |||
191 | #define SEGMENT_TI_MASK 0x4 | 191 | #define SEGMENT_TI_MASK 0x4 |
192 | 192 | ||
193 | #define IDT_ENTRIES 256 | 193 | #define IDT_ENTRIES 256 |
194 | #define NUM_EXCEPTION_VECTORS 32 | ||
194 | #define GDT_SIZE (GDT_ENTRIES * 8) | 195 | #define GDT_SIZE (GDT_ENTRIES * 8) |
195 | #define GDT_ENTRY_TLS_ENTRIES 3 | 196 | #define GDT_ENTRY_TLS_ENTRIES 3 |
196 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | 197 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
197 | 198 | ||
198 | #ifdef __KERNEL__ | 199 | #ifdef __KERNEL__ |
199 | #ifndef __ASSEMBLY__ | 200 | #ifndef __ASSEMBLY__ |
200 | extern const char early_idt_handlers[IDT_ENTRIES][10]; | 201 | extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; |
201 | #endif | 202 | #endif |
202 | #endif | 203 | #endif |
203 | 204 | ||
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h index 572c0b67a6b0..d9b2034ed1d2 100644 --- a/include/asm-x86/semaphore.h +++ b/include/asm-x86/semaphore.h | |||
@@ -1,5 +1 @@ | |||
1 | #ifdef CONFIG_X86_32 | #include <linux/semaphore.h> | |
2 | # include "semaphore_32.h" | ||
3 | #else | ||
4 | # include "semaphore_64.h" | ||
5 | #endif | ||
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h deleted file mode 100644 index ac96d3804d0c..000000000000 --- a/include/asm-x86/semaphore_32.h +++ /dev/null | |||
@@ -1,175 +0,0 @@ | |||
1 | #ifndef _I386_SEMAPHORE_H | ||
2 | #define _I386_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * (C) Copyright 1996 Linus Torvalds | ||
12 | * | ||
13 | * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in | ||
14 | * the original code and to make semaphore waits | ||
15 | * interruptible so that processes waiting on | ||
16 | * semaphores can be killed. | ||
17 | * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper | ||
18 | * functions in asm/sempahore-helper.h while fixing a | ||
19 | * potential and subtle race discovered by Ulrich Schmid | ||
20 | * in down_interruptible(). Since I started to play here I | ||
21 | * also implemented the `trylock' semaphore operation. | ||
22 | * 1999-07-02 Artur Skawina <skawina@geocities.com> | ||
23 | * Optimized "0(ecx)" -> "(ecx)" (the assembler does not | ||
24 | * do this). Changed calling sequences from push/jmp to | ||
25 | * traditional call/ret. | ||
26 | * Modified 2001-01-01 Andreas Franck <afranck@gmx.de> | ||
27 | * Some hacks to ensure compatibility with recent | ||
28 | * GCC snapshots, to avoid stack corruption when compiling | ||
29 | * with -fomit-frame-pointer. It's not sure if this will | ||
30 | * be fixed in GCC, as our previous implementation was a | ||
31 | * bit dubious. | ||
32 | * | ||
33 | * If you would like to see an analysis of this implementation, please | ||
34 | * ftp to gcom.com and download the file | ||
35 | * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. | ||
36 | * | ||
37 | */ | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/atomic.h> | ||
41 | #include <linux/wait.h> | ||
42 | #include <linux/rwsem.h> | ||
43 | |||
44 | struct semaphore { | ||
45 | atomic_t count; | ||
46 | int sleepers; | ||
47 | wait_queue_head_t wait; | ||
48 | }; | ||
49 | |||
50 | |||
51 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
52 | { \ | ||
53 | .count = ATOMIC_INIT(n), \ | ||
54 | .sleepers = 0, \ | ||
55 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
56 | } | ||
57 | |||
58 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
59 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
60 | |||
61 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
62 | |||
63 | static inline void sema_init (struct semaphore *sem, int val) | ||
64 | { | ||
65 | /* | ||
66 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
67 | * | ||
68 | * i'd rather use the more flexible initialization above, but sadly | ||
69 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
70 | */ | ||
71 | atomic_set(&sem->count, val); | ||
72 | sem->sleepers = 0; | ||
73 | init_waitqueue_head(&sem->wait); | ||
74 | } | ||
75 | |||
76 | static inline void init_MUTEX (struct semaphore *sem) | ||
77 | { | ||
78 | sema_init(sem, 1); | ||
79 | } | ||
80 | |||
81 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
82 | { | ||
83 | sema_init(sem, 0); | ||
84 | } | ||
85 | |||
86 | extern asmregparm void __down_failed(atomic_t *count_ptr); | ||
87 | extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr); | ||
88 | extern asmregparm int __down_failed_trylock(atomic_t *count_ptr); | ||
89 | extern asmregparm void __up_wakeup(atomic_t *count_ptr); | ||
90 | |||
91 | /* | ||
92 | * This is ugly, but we want the default case to fall through. | ||
93 | * "__down_failed" is a special asm handler that calls the C | ||
94 | * routine that actually waits. See arch/i386/kernel/semaphore.c | ||
95 | */ | ||
96 | static inline void down(struct semaphore * sem) | ||
97 | { | ||
98 | might_sleep(); | ||
99 | __asm__ __volatile__( | ||
100 | "# atomic down operation\n\t" | ||
101 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ | ||
102 | "jns 2f\n" | ||
103 | "\tlea %0,%%eax\n\t" | ||
104 | "call __down_failed\n" | ||
105 | "2:" | ||
106 | :"+m" (sem->count) | ||
107 | : | ||
108 | :"memory","ax"); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Interruptible try to acquire a semaphore. If we obtained | ||
113 | * it, return zero. If we were interrupted, returns -EINTR | ||
114 | */ | ||
115 | static inline int down_interruptible(struct semaphore * sem) | ||
116 | { | ||
117 | int result; | ||
118 | |||
119 | might_sleep(); | ||
120 | __asm__ __volatile__( | ||
121 | "# atomic interruptible down operation\n\t" | ||
122 | "xorl %0,%0\n\t" | ||
123 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
124 | "jns 2f\n\t" | ||
125 | "lea %1,%%eax\n\t" | ||
126 | "call __down_failed_interruptible\n" | ||
127 | "2:" | ||
128 | :"=&a" (result), "+m" (sem->count) | ||
129 | : | ||
130 | :"memory"); | ||
131 | return result; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Non-blockingly attempt to down() a semaphore. | ||
136 | * Returns zero if we acquired it | ||
137 | */ | ||
138 | static inline int down_trylock(struct semaphore * sem) | ||
139 | { | ||
140 | int result; | ||
141 | |||
142 | __asm__ __volatile__( | ||
143 | "# atomic interruptible down operation\n\t" | ||
144 | "xorl %0,%0\n\t" | ||
145 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
146 | "jns 2f\n\t" | ||
147 | "lea %1,%%eax\n\t" | ||
148 | "call __down_failed_trylock\n\t" | ||
149 | "2:\n" | ||
150 | :"=&a" (result), "+m" (sem->count) | ||
151 | : | ||
152 | :"memory"); | ||
153 | return result; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Note! This is subtle. We jump to wake people up only if | ||
158 | * the semaphore was negative (== somebody was waiting on it). | ||
159 | */ | ||
160 | static inline void up(struct semaphore * sem) | ||
161 | { | ||
162 | __asm__ __volatile__( | ||
163 | "# atomic up operation\n\t" | ||
164 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ | ||
165 | "jg 1f\n\t" | ||
166 | "lea %0,%%eax\n\t" | ||
167 | "call __up_wakeup\n" | ||
168 | "1:" | ||
169 | :"+m" (sem->count) | ||
170 | : | ||
171 | :"memory","ax"); | ||
172 | } | ||
173 | |||
174 | #endif | ||
175 | #endif | ||
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h deleted file mode 100644 index 79694306bf7d..000000000000 --- a/include/asm-x86/semaphore_64.h +++ /dev/null | |||
@@ -1,180 +0,0 @@ | |||
1 | #ifndef _X86_64_SEMAPHORE_H | ||
2 | #define _X86_64_SEMAPHORE_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef __KERNEL__ | ||
7 | |||
8 | /* | ||
9 | * SMP- and interrupt-safe semaphores.. | ||
10 | * | ||
11 | * (C) Copyright 1996 Linus Torvalds | ||
12 | * | ||
13 | * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in | ||
14 | * the original code and to make semaphore waits | ||
15 | * interruptible so that processes waiting on | ||
16 | * semaphores can be killed. | ||
17 | * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper | ||
18 | * functions in asm/sempahore-helper.h while fixing a | ||
19 | * potential and subtle race discovered by Ulrich Schmid | ||
20 | * in down_interruptible(). Since I started to play here I | ||
21 | * also implemented the `trylock' semaphore operation. | ||
22 | * 1999-07-02 Artur Skawina <skawina@geocities.com> | ||
23 | * Optimized "0(ecx)" -> "(ecx)" (the assembler does not | ||
24 | * do this). Changed calling sequences from push/jmp to | ||
25 | * traditional call/ret. | ||
26 | * Modified 2001-01-01 Andreas Franck <afranck@gmx.de> | ||
27 | * Some hacks to ensure compatibility with recent | ||
28 | * GCC snapshots, to avoid stack corruption when compiling | ||
29 | * with -fomit-frame-pointer. It's not sure if this will | ||
30 | * be fixed in GCC, as our previous implementation was a | ||
31 | * bit dubious. | ||
32 | * | ||
33 | * If you would like to see an analysis of this implementation, please | ||
34 | * ftp to gcom.com and download the file | ||
35 | * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. | ||
36 | * | ||
37 | */ | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/atomic.h> | ||
41 | #include <asm/rwlock.h> | ||
42 | #include <linux/wait.h> | ||
43 | #include <linux/rwsem.h> | ||
44 | #include <linux/stringify.h> | ||
45 | |||
46 | struct semaphore { | ||
47 | atomic_t count; | ||
48 | int sleepers; | ||
49 | wait_queue_head_t wait; | ||
50 | }; | ||
51 | |||
52 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
53 | { \ | ||
54 | .count = ATOMIC_INIT(n), \ | ||
55 | .sleepers = 0, \ | ||
56 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
57 | } | ||
58 | |||
59 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
60 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
61 | |||
62 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
63 | |||
64 | static inline void sema_init (struct semaphore *sem, int val) | ||
65 | { | ||
66 | /* | ||
67 | * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); | ||
68 | * | ||
69 | * i'd rather use the more flexible initialization above, but sadly | ||
70 | * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. | ||
71 | */ | ||
72 | atomic_set(&sem->count, val); | ||
73 | sem->sleepers = 0; | ||
74 | init_waitqueue_head(&sem->wait); | ||
75 | } | ||
76 | |||
77 | static inline void init_MUTEX (struct semaphore *sem) | ||
78 | { | ||
79 | sema_init(sem, 1); | ||
80 | } | ||
81 | |||
82 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
83 | { | ||
84 | sema_init(sem, 0); | ||
85 | } | ||
86 | |||
87 | asmlinkage void __down_failed(void /* special register calling convention */); | ||
88 | asmlinkage int __down_failed_interruptible(void /* params in registers */); | ||
89 | asmlinkage int __down_failed_trylock(void /* params in registers */); | ||
90 | asmlinkage void __up_wakeup(void /* special register calling convention */); | ||
91 | |||
92 | asmlinkage void __down(struct semaphore * sem); | ||
93 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
94 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
95 | asmlinkage void __up(struct semaphore * sem); | ||
96 | |||
97 | /* | ||
98 | * This is ugly, but we want the default case to fall through. | ||
99 | * "__down_failed" is a special asm handler that calls the C | ||
100 | * routine that actually waits. See arch/x86_64/kernel/semaphore.c | ||
101 | */ | ||
102 | static inline void down(struct semaphore * sem) | ||
103 | { | ||
104 | might_sleep(); | ||
105 | |||
106 | __asm__ __volatile__( | ||
107 | "# atomic down operation\n\t" | ||
108 | LOCK_PREFIX "decl %0\n\t" /* --sem->count */ | ||
109 | "jns 1f\n\t" | ||
110 | "call __down_failed\n" | ||
111 | "1:" | ||
112 | :"=m" (sem->count) | ||
113 | :"D" (sem) | ||
114 | :"memory"); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Interruptible try to acquire a semaphore. If we obtained | ||
119 | * it, return zero. If we were interrupted, returns -EINTR | ||
120 | */ | ||
121 | static inline int down_interruptible(struct semaphore * sem) | ||
122 | { | ||
123 | int result; | ||
124 | |||
125 | might_sleep(); | ||
126 | |||
127 | __asm__ __volatile__( | ||
128 | "# atomic interruptible down operation\n\t" | ||
129 | "xorl %0,%0\n\t" | ||
130 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
131 | "jns 2f\n\t" | ||
132 | "call __down_failed_interruptible\n" | ||
133 | "2:\n" | ||
134 | :"=&a" (result), "=m" (sem->count) | ||
135 | :"D" (sem) | ||
136 | :"memory"); | ||
137 | return result; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Non-blockingly attempt to down() a semaphore. | ||
142 | * Returns zero if we acquired it | ||
143 | */ | ||
144 | static inline int down_trylock(struct semaphore * sem) | ||
145 | { | ||
146 | int result; | ||
147 | |||
148 | __asm__ __volatile__( | ||
149 | "# atomic interruptible down operation\n\t" | ||
150 | "xorl %0,%0\n\t" | ||
151 | LOCK_PREFIX "decl %1\n\t" /* --sem->count */ | ||
152 | "jns 2f\n\t" | ||
153 | "call __down_failed_trylock\n\t" | ||
154 | "2:\n" | ||
155 | :"=&a" (result), "=m" (sem->count) | ||
156 | :"D" (sem) | ||
157 | :"memory","cc"); | ||
158 | return result; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Note! This is subtle. We jump to wake people up only if | ||
163 | * the semaphore was negative (== somebody was waiting on it). | ||
164 | * The default case (no contention) will result in NO | ||
165 | * jumps for both down() and up(). | ||
166 | */ | ||
167 | static inline void up(struct semaphore * sem) | ||
168 | { | ||
169 | __asm__ __volatile__( | ||
170 | "# atomic up operation\n\t" | ||
171 | LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ | ||
172 | "jg 1f\n\t" | ||
173 | "call __up_wakeup\n" | ||
174 | "1:" | ||
175 | :"=m" (sem->count) | ||
176 | :"D" (sem) | ||
177 | :"memory"); | ||
178 | } | ||
179 | #endif /* __KERNEL__ */ | ||
180 | #endif | ||
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 071e054abd82..fa6763af8d26 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h | |||
@@ -4,6 +4,10 @@ | |||
4 | #define COMMAND_LINE_SIZE 2048 | 4 | #define COMMAND_LINE_SIZE 2048 |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | 6 | #ifndef __ASSEMBLY__ |
7 | |||
8 | /* Interrupt control for vSMPowered x86_64 systems */ | ||
9 | void vsmp_init(void); | ||
10 | |||
7 | char *machine_specific_memory_setup(void); | 11 | char *machine_specific_memory_setup(void); |
8 | #ifndef CONFIG_PARAVIRT | 12 | #ifndef CONFIG_PARAVIRT |
9 | #define paravirt_post_allocator_init() do {} while (0) | 13 | #define paravirt_post_allocator_init() do {} while (0) |
@@ -51,8 +55,8 @@ struct e820entry; | |||
51 | char * __init machine_specific_memory_setup(void); | 55 | char * __init machine_specific_memory_setup(void); |
52 | char *memory_setup(void); | 56 | char *memory_setup(void); |
53 | 57 | ||
54 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map); | 58 | int __init copy_e820_map(struct e820entry *biosmap, int nr_map); |
55 | int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); | 59 | int __init sanitize_e820_map(struct e820entry *biosmap, char *pnr_map); |
56 | void __init add_memory_region(unsigned long long start, | 60 | void __init add_memory_region(unsigned long long start, |
57 | unsigned long long size, int type); | 61 | unsigned long long size, int type); |
58 | 62 | ||
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index d743947f4c77..2f9c884d2c0f 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h | |||
@@ -79,7 +79,7 @@ struct sigcontext { | |||
79 | unsigned long flags; | 79 | unsigned long flags; |
80 | unsigned long sp_at_signal; | 80 | unsigned long sp_at_signal; |
81 | unsigned short ss, __ssh; | 81 | unsigned short ss, __ssh; |
82 | struct _fpstate __user * fpstate; | 82 | struct _fpstate __user *fpstate; |
83 | unsigned long oldmask; | 83 | unsigned long oldmask; |
84 | unsigned long cr2; | 84 | unsigned long cr2; |
85 | }; | 85 | }; |
@@ -107,7 +107,7 @@ struct sigcontext { | |||
107 | unsigned long eflags; | 107 | unsigned long eflags; |
108 | unsigned long esp_at_signal; | 108 | unsigned long esp_at_signal; |
109 | unsigned short ss, __ssh; | 109 | unsigned short ss, __ssh; |
110 | struct _fpstate __user * fpstate; | 110 | struct _fpstate __user *fpstate; |
111 | unsigned long oldmask; | 111 | unsigned long oldmask; |
112 | unsigned long cr2; | 112 | unsigned long cr2; |
113 | }; | 113 | }; |
@@ -121,7 +121,8 @@ struct sigcontext { | |||
121 | struct _fpstate { | 121 | struct _fpstate { |
122 | __u16 cwd; | 122 | __u16 cwd; |
123 | __u16 swd; | 123 | __u16 swd; |
124 | __u16 twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ | 124 | __u16 twd; /* Note this is not the same as the |
125 | 32bit/x87/FSAVE twd */ | ||
125 | __u16 fop; | 126 | __u16 fop; |
126 | __u64 rip; | 127 | __u64 rip; |
127 | __u64 rdp; | 128 | __u64 rdp; |
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 6ffab4fd593a..57a9686fb491 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h | |||
@@ -26,7 +26,7 @@ struct _fpstate_ia32 { | |||
26 | __u32 cw; | 26 | __u32 cw; |
27 | __u32 sw; | 27 | __u32 sw; |
28 | __u32 tag; /* not compatible to 64bit twd */ | 28 | __u32 tag; /* not compatible to 64bit twd */ |
29 | __u32 ipoff; | 29 | __u32 ipoff; |
30 | __u32 cssel; | 30 | __u32 cssel; |
31 | __u32 dataoff; | 31 | __u32 dataoff; |
32 | __u32 datasel; | 32 | __u32 datasel; |
@@ -39,7 +39,7 @@ struct _fpstate_ia32 { | |||
39 | __u32 mxcsr; | 39 | __u32 mxcsr; |
40 | __u32 reserved; | 40 | __u32 reserved; |
41 | struct _fpxreg _fxsr_st[8]; | 41 | struct _fpxreg _fxsr_st[8]; |
42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ | 42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ |
43 | __u32 padding[56]; | 43 | __u32 padding[56]; |
44 | }; | 44 | }; |
45 | 45 | ||
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h index aee7eca585ab..f15186d39c69 100644 --- a/include/asm-x86/signal.h +++ b/include/asm-x86/signal.h | |||
@@ -185,61 +185,61 @@ typedef struct sigaltstack { | |||
185 | 185 | ||
186 | #define __HAVE_ARCH_SIG_BITOPS | 186 | #define __HAVE_ARCH_SIG_BITOPS |
187 | 187 | ||
188 | #define sigaddset(set,sig) \ | 188 | #define sigaddset(set,sig) \ |
189 | (__builtin_constantp(sig) ? \ | 189 | (__builtin_constantp(sig) \ |
190 | __const_sigaddset((set),(sig)) : \ | 190 | ? __const_sigaddset((set), (sig)) \ |
191 | __gen_sigaddset((set),(sig))) | 191 | : __gen_sigaddset((set), (sig))) |
192 | 192 | ||
193 | static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) | 193 | static inline void __gen_sigaddset(sigset_t *set, int _sig) |
194 | { | 194 | { |
195 | __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); | 195 | asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); |
196 | } | 196 | } |
197 | 197 | ||
198 | static __inline__ void __const_sigaddset(sigset_t *set, int _sig) | 198 | static inline void __const_sigaddset(sigset_t *set, int _sig) |
199 | { | 199 | { |
200 | unsigned long sig = _sig - 1; | 200 | unsigned long sig = _sig - 1; |
201 | set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); | 201 | set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); |
202 | } | 202 | } |
203 | 203 | ||
204 | #define sigdelset(set,sig) \ | 204 | #define sigdelset(set, sig) \ |
205 | (__builtin_constant_p(sig) ? \ | 205 | (__builtin_constant_p(sig) \ |
206 | __const_sigdelset((set),(sig)) : \ | 206 | ? __const_sigdelset((set), (sig)) \ |
207 | __gen_sigdelset((set),(sig))) | 207 | : __gen_sigdelset((set), (sig))) |
208 | 208 | ||
209 | 209 | ||
210 | static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) | 210 | static inline void __gen_sigdelset(sigset_t *set, int _sig) |
211 | { | 211 | { |
212 | __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); | 212 | asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); |
213 | } | 213 | } |
214 | 214 | ||
215 | static __inline__ void __const_sigdelset(sigset_t *set, int _sig) | 215 | static inline void __const_sigdelset(sigset_t *set, int _sig) |
216 | { | 216 | { |
217 | unsigned long sig = _sig - 1; | 217 | unsigned long sig = _sig - 1; |
218 | set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); | 218 | set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); |
219 | } | 219 | } |
220 | 220 | ||
221 | static __inline__ int __const_sigismember(sigset_t *set, int _sig) | 221 | static inline int __const_sigismember(sigset_t *set, int _sig) |
222 | { | 222 | { |
223 | unsigned long sig = _sig - 1; | 223 | unsigned long sig = _sig - 1; |
224 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); | 224 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); |
225 | } | 225 | } |
226 | 226 | ||
227 | static __inline__ int __gen_sigismember(sigset_t *set, int _sig) | 227 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
228 | { | 228 | { |
229 | int ret; | 229 | int ret; |
230 | __asm__("btl %2,%1\n\tsbbl %0,%0" | 230 | asm("btl %2,%1\n\tsbbl %0,%0" |
231 | : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | 231 | : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); |
232 | return ret; | 232 | return ret; |
233 | } | 233 | } |
234 | 234 | ||
235 | #define sigismember(set,sig) \ | 235 | #define sigismember(set, sig) \ |
236 | (__builtin_constant_p(sig) ? \ | 236 | (__builtin_constant_p(sig) \ |
237 | __const_sigismember((set),(sig)) : \ | 237 | ? __const_sigismember((set), (sig)) \ |
238 | __gen_sigismember((set),(sig))) | 238 | : __gen_sigismember((set), (sig))) |
239 | 239 | ||
240 | static __inline__ int sigfindinword(unsigned long word) | 240 | static inline int sigfindinword(unsigned long word) |
241 | { | 241 | { |
242 | __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); | 242 | asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); |
243 | return word; | 243 | return word; |
244 | } | 244 | } |
245 | 245 | ||
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index f2e8319a6b0b..62ebdec394b9 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -1,5 +1,209 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _ASM_X86_SMP_H_ |
2 | # include "smp_32.h" | 2 | #define _ASM_X86_SMP_H_ |
3 | #ifndef __ASSEMBLY__ | ||
4 | #include <linux/cpumask.h> | ||
5 | #include <linux/init.h> | ||
6 | #include <asm/percpu.h> | ||
7 | |||
8 | /* | ||
9 | * We need the APIC definitions automatically as part of 'smp.h' | ||
10 | */ | ||
11 | #ifdef CONFIG_X86_LOCAL_APIC | ||
12 | # include <asm/mpspec.h> | ||
13 | # include <asm/apic.h> | ||
14 | # ifdef CONFIG_X86_IO_APIC | ||
15 | # include <asm/io_apic.h> | ||
16 | # endif | ||
17 | #endif | ||
18 | #include <asm/pda.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | |||
21 | extern cpumask_t cpu_callout_map; | ||
22 | extern cpumask_t cpu_initialized; | ||
23 | extern cpumask_t cpu_callin_map; | ||
24 | |||
25 | extern void (*mtrr_hook)(void); | ||
26 | extern void zap_low_mappings(void); | ||
27 | |||
28 | extern int smp_num_siblings; | ||
29 | extern unsigned int num_processors; | ||
30 | extern cpumask_t cpu_initialized; | ||
31 | |||
32 | #ifdef CONFIG_SMP | ||
33 | extern u16 x86_cpu_to_apicid_init[]; | ||
34 | extern u16 x86_bios_cpu_apicid_init[]; | ||
35 | extern void *x86_cpu_to_apicid_early_ptr; | ||
36 | extern void *x86_bios_cpu_apicid_early_ptr; | ||
3 | #else | 37 | #else |
4 | # include "smp_64.h" | 38 | #define x86_cpu_to_apicid_early_ptr NULL |
39 | #define x86_bios_cpu_apicid_early_ptr NULL | ||
40 | #endif | ||
41 | |||
42 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
43 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
44 | DECLARE_PER_CPU(u16, cpu_llc_id); | ||
45 | DECLARE_PER_CPU(u16, x86_cpu_to_apicid); | ||
46 | DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); | ||
47 | |||
48 | /* Static state in head.S used to set up a CPU */ | ||
49 | extern struct { | ||
50 | void *sp; | ||
51 | unsigned short ss; | ||
52 | } stack_start; | ||
53 | |||
54 | struct smp_ops { | ||
55 | void (*smp_prepare_boot_cpu)(void); | ||
56 | void (*smp_prepare_cpus)(unsigned max_cpus); | ||
57 | int (*cpu_up)(unsigned cpu); | ||
58 | void (*smp_cpus_done)(unsigned max_cpus); | ||
59 | |||
60 | void (*smp_send_stop)(void); | ||
61 | void (*smp_send_reschedule)(int cpu); | ||
62 | int (*smp_call_function_mask)(cpumask_t mask, | ||
63 | void (*func)(void *info), void *info, | ||
64 | int wait); | ||
65 | }; | ||
66 | |||
67 | /* Globals due to paravirt */ | ||
68 | extern void set_cpu_sibling_map(int cpu); | ||
69 | |||
70 | #ifdef CONFIG_SMP | ||
71 | #ifndef CONFIG_PARAVIRT | ||
72 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | ||
73 | #endif | ||
74 | extern struct smp_ops smp_ops; | ||
75 | |||
76 | static inline void smp_send_stop(void) | ||
77 | { | ||
78 | smp_ops.smp_send_stop(); | ||
79 | } | ||
80 | |||
81 | static inline void smp_prepare_boot_cpu(void) | ||
82 | { | ||
83 | smp_ops.smp_prepare_boot_cpu(); | ||
84 | } | ||
85 | |||
86 | static inline void smp_prepare_cpus(unsigned int max_cpus) | ||
87 | { | ||
88 | smp_ops.smp_prepare_cpus(max_cpus); | ||
89 | } | ||
90 | |||
91 | static inline void smp_cpus_done(unsigned int max_cpus) | ||
92 | { | ||
93 | smp_ops.smp_cpus_done(max_cpus); | ||
94 | } | ||
95 | |||
96 | static inline int __cpu_up(unsigned int cpu) | ||
97 | { | ||
98 | return smp_ops.cpu_up(cpu); | ||
99 | } | ||
100 | |||
101 | static inline void smp_send_reschedule(int cpu) | ||
102 | { | ||
103 | smp_ops.smp_send_reschedule(cpu); | ||
104 | } | ||
105 | |||
106 | static inline int smp_call_function_mask(cpumask_t mask, | ||
107 | void (*func) (void *info), void *info, | ||
108 | int wait) | ||
109 | { | ||
110 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
111 | } | ||
112 | |||
113 | void native_smp_prepare_boot_cpu(void); | ||
114 | void native_smp_prepare_cpus(unsigned int max_cpus); | ||
115 | void native_smp_cpus_done(unsigned int max_cpus); | ||
116 | int native_cpu_up(unsigned int cpunum); | ||
117 | |||
118 | extern int __cpu_disable(void); | ||
119 | extern void __cpu_die(unsigned int cpu); | ||
120 | |||
121 | extern void prefill_possible_map(void); | ||
122 | |||
123 | void smp_store_cpu_info(int id); | ||
124 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
125 | |||
126 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
127 | static inline int num_booting_cpus(void) | ||
128 | { | ||
129 | return cpus_weight(cpu_callout_map); | ||
130 | } | ||
131 | #endif /* CONFIG_SMP */ | ||
132 | |||
133 | extern unsigned disabled_cpus __cpuinitdata; | ||
134 | |||
135 | #ifdef CONFIG_X86_32_SMP | ||
136 | /* | ||
137 | * This function is needed by all SMP systems. It must _always_ be valid | ||
138 | * from the initial startup. We map APIC_BASE very early in page_setup(), | ||
139 | * so this is correct in the x86 case. | ||
140 | */ | ||
141 | DECLARE_PER_CPU(int, cpu_number); | ||
142 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | ||
143 | extern int safe_smp_processor_id(void); | ||
144 | |||
145 | #elif defined(CONFIG_X86_64_SMP) | ||
146 | #define raw_smp_processor_id() read_pda(cpunumber) | ||
147 | |||
148 | #define stack_smp_processor_id() \ | ||
149 | ({ \ | ||
150 | struct thread_info *ti; \ | ||
151 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
152 | ti->cpu; \ | ||
153 | }) | ||
154 | #define safe_smp_processor_id() smp_processor_id() | ||
155 | |||
156 | #else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ | ||
157 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | ||
158 | #define safe_smp_processor_id() 0 | ||
159 | #define stack_smp_processor_id() 0 | ||
160 | #endif | ||
161 | |||
162 | #ifdef CONFIG_X86_LOCAL_APIC | ||
163 | |||
164 | static inline int logical_smp_processor_id(void) | ||
165 | { | ||
166 | /* we don't want to mark this access volatile - bad code generation */ | ||
167 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
168 | } | ||
169 | |||
170 | #ifndef CONFIG_X86_64 | ||
171 | static inline unsigned int read_apic_id(void) | ||
172 | { | ||
173 | return *(u32 *)(APIC_BASE + APIC_ID); | ||
174 | } | ||
175 | #else | ||
176 | extern unsigned int read_apic_id(void); | ||
177 | #endif | ||
178 | |||
179 | |||
180 | # ifdef APIC_DEFINITION | ||
181 | extern int hard_smp_processor_id(void); | ||
182 | # else | ||
183 | # include <mach_apicdef.h> | ||
184 | static inline int hard_smp_processor_id(void) | ||
185 | { | ||
186 | /* we don't want to mark this access volatile - bad code generation */ | ||
187 | return GET_APIC_ID(read_apic_id()); | ||
188 | } | ||
189 | # endif /* APIC_DEFINITION */ | ||
190 | |||
191 | #else /* CONFIG_X86_LOCAL_APIC */ | ||
192 | |||
193 | # ifndef CONFIG_SMP | ||
194 | # define hard_smp_processor_id() 0 | ||
195 | # endif | ||
196 | |||
197 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
198 | |||
199 | #ifdef CONFIG_HOTPLUG_CPU | ||
200 | extern void cpu_exit_clear(void); | ||
201 | extern void cpu_uninit(void); | ||
202 | extern void remove_siblinginfo(int cpu); | ||
203 | #endif | ||
204 | |||
205 | extern void smp_alloc_memory(void); | ||
206 | extern void lock_ipi_call_lock(void); | ||
207 | extern void unlock_ipi_call_lock(void); | ||
208 | #endif /* __ASSEMBLY__ */ | ||
5 | #endif | 209 | #endif |
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h deleted file mode 100644 index 56152e312287..000000000000 --- a/include/asm-x86/smp_32.h +++ /dev/null | |||
@@ -1,165 +0,0 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/init.h> | ||
7 | |||
8 | /* | ||
9 | * We need the APIC definitions automatically as part of 'smp.h' | ||
10 | */ | ||
11 | #ifdef CONFIG_X86_LOCAL_APIC | ||
12 | # include <asm/mpspec.h> | ||
13 | # include <asm/apic.h> | ||
14 | # ifdef CONFIG_X86_IO_APIC | ||
15 | # include <asm/io_apic.h> | ||
16 | # endif | ||
17 | #endif | ||
18 | |||
19 | extern cpumask_t cpu_callout_map; | ||
20 | extern cpumask_t cpu_callin_map; | ||
21 | |||
22 | extern int smp_num_siblings; | ||
23 | extern unsigned int num_processors; | ||
24 | |||
25 | extern void smp_alloc_memory(void); | ||
26 | extern void lock_ipi_call_lock(void); | ||
27 | extern void unlock_ipi_call_lock(void); | ||
28 | |||
29 | extern void (*mtrr_hook) (void); | ||
30 | extern void zap_low_mappings (void); | ||
31 | |||
32 | extern u8 __initdata x86_cpu_to_apicid_init[]; | ||
33 | extern void *x86_cpu_to_apicid_early_ptr; | ||
34 | |||
35 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
36 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
37 | DECLARE_PER_CPU(u8, cpu_llc_id); | ||
38 | DECLARE_PER_CPU(u8, x86_cpu_to_apicid); | ||
39 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | ||
41 | extern void cpu_exit_clear(void); | ||
42 | extern void cpu_uninit(void); | ||
43 | extern void remove_siblinginfo(int cpu); | ||
44 | #endif | ||
45 | |||
46 | /* Globals due to paravirt */ | ||
47 | extern void set_cpu_sibling_map(int cpu); | ||
48 | |||
49 | struct smp_ops | ||
50 | { | ||
51 | void (*smp_prepare_boot_cpu)(void); | ||
52 | void (*smp_prepare_cpus)(unsigned max_cpus); | ||
53 | int (*cpu_up)(unsigned cpu); | ||
54 | void (*smp_cpus_done)(unsigned max_cpus); | ||
55 | |||
56 | void (*smp_send_stop)(void); | ||
57 | void (*smp_send_reschedule)(int cpu); | ||
58 | int (*smp_call_function_mask)(cpumask_t mask, | ||
59 | void (*func)(void *info), void *info, | ||
60 | int wait); | ||
61 | }; | ||
62 | |||
63 | #ifdef CONFIG_SMP | ||
64 | extern struct smp_ops smp_ops; | ||
65 | |||
66 | static inline void smp_prepare_boot_cpu(void) | ||
67 | { | ||
68 | smp_ops.smp_prepare_boot_cpu(); | ||
69 | } | ||
70 | static inline void smp_prepare_cpus(unsigned int max_cpus) | ||
71 | { | ||
72 | smp_ops.smp_prepare_cpus(max_cpus); | ||
73 | } | ||
74 | static inline int __cpu_up(unsigned int cpu) | ||
75 | { | ||
76 | return smp_ops.cpu_up(cpu); | ||
77 | } | ||
78 | static inline void smp_cpus_done(unsigned int max_cpus) | ||
79 | { | ||
80 | smp_ops.smp_cpus_done(max_cpus); | ||
81 | } | ||
82 | |||
83 | static inline void smp_send_stop(void) | ||
84 | { | ||
85 | smp_ops.smp_send_stop(); | ||
86 | } | ||
87 | static inline void smp_send_reschedule(int cpu) | ||
88 | { | ||
89 | smp_ops.smp_send_reschedule(cpu); | ||
90 | } | ||
91 | static inline int smp_call_function_mask(cpumask_t mask, | ||
92 | void (*func) (void *info), void *info, | ||
93 | int wait) | ||
94 | { | ||
95 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
96 | } | ||
97 | |||
98 | void native_smp_prepare_boot_cpu(void); | ||
99 | void native_smp_prepare_cpus(unsigned int max_cpus); | ||
100 | int native_cpu_up(unsigned int cpunum); | ||
101 | void native_smp_cpus_done(unsigned int max_cpus); | ||
102 | |||
103 | #ifndef CONFIG_PARAVIRT | ||
104 | #define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) | ||
105 | #endif | ||
106 | |||
107 | extern int __cpu_disable(void); | ||
108 | extern void __cpu_die(unsigned int cpu); | ||
109 | |||
110 | /* | ||
111 | * This function is needed by all SMP systems. It must _always_ be valid | ||
112 | * from the initial startup. We map APIC_BASE very early in page_setup(), | ||
113 | * so this is correct in the x86 case. | ||
114 | */ | ||
115 | DECLARE_PER_CPU(int, cpu_number); | ||
116 | #define raw_smp_processor_id() (x86_read_percpu(cpu_number)) | ||
117 | |||
118 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
119 | |||
120 | extern int safe_smp_processor_id(void); | ||
121 | |||
122 | void __cpuinit smp_store_cpu_info(int id); | ||
123 | |||
124 | /* We don't mark CPUs online until __cpu_up(), so we need another measure */ | ||
125 | static inline int num_booting_cpus(void) | ||
126 | { | ||
127 | return cpus_weight(cpu_callout_map); | ||
128 | } | ||
129 | |||
130 | #else /* CONFIG_SMP */ | ||
131 | |||
132 | #define safe_smp_processor_id() 0 | ||
133 | #define cpu_physical_id(cpu) boot_cpu_physical_apicid | ||
134 | |||
135 | #endif /* !CONFIG_SMP */ | ||
136 | |||
137 | #ifdef CONFIG_X86_LOCAL_APIC | ||
138 | |||
139 | static __inline int logical_smp_processor_id(void) | ||
140 | { | ||
141 | /* we don't want to mark this access volatile - bad code generation */ | ||
142 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
143 | } | ||
144 | |||
145 | # ifdef APIC_DEFINITION | ||
146 | extern int hard_smp_processor_id(void); | ||
147 | # else | ||
148 | # include <mach_apicdef.h> | ||
149 | static inline int hard_smp_processor_id(void) | ||
150 | { | ||
151 | /* we don't want to mark this access volatile - bad code generation */ | ||
152 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); | ||
153 | } | ||
154 | # endif /* APIC_DEFINITION */ | ||
155 | |||
156 | #else /* CONFIG_X86_LOCAL_APIC */ | ||
157 | |||
158 | # ifndef CONFIG_SMP | ||
159 | # define hard_smp_processor_id() 0 | ||
160 | # endif | ||
161 | |||
162 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
163 | |||
164 | #endif /* !ASSEMBLY */ | ||
165 | #endif | ||
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h deleted file mode 100644 index e0a75519ad21..000000000000 --- a/include/asm-x86/smp_64.h +++ /dev/null | |||
@@ -1,101 +0,0 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #include <linux/cpumask.h> | ||
5 | #include <linux/init.h> | ||
6 | |||
7 | /* | ||
8 | * We need the APIC definitions automatically as part of 'smp.h' | ||
9 | */ | ||
10 | #include <asm/apic.h> | ||
11 | #include <asm/io_apic.h> | ||
12 | #include <asm/mpspec.h> | ||
13 | #include <asm/pda.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | |||
16 | extern cpumask_t cpu_callout_map; | ||
17 | extern cpumask_t cpu_initialized; | ||
18 | |||
19 | extern int smp_num_siblings; | ||
20 | extern unsigned int num_processors; | ||
21 | |||
22 | extern void smp_alloc_memory(void); | ||
23 | extern void lock_ipi_call_lock(void); | ||
24 | extern void unlock_ipi_call_lock(void); | ||
25 | |||
26 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
27 | void *info, int wait); | ||
28 | |||
29 | extern u16 __initdata x86_cpu_to_apicid_init[]; | ||
30 | extern u16 __initdata x86_bios_cpu_apicid_init[]; | ||
31 | extern void *x86_cpu_to_apicid_early_ptr; | ||
32 | extern void *x86_bios_cpu_apicid_early_ptr; | ||
33 | |||
34 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | ||
35 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | ||
36 | DECLARE_PER_CPU(u16, cpu_llc_id); | ||
37 | DECLARE_PER_CPU(u16, x86_cpu_to_apicid); | ||
38 | DECLARE_PER_CPU(u16, x86_bios_cpu_apicid); | ||
39 | |||
40 | static inline int cpu_present_to_apicid(int mps_cpu) | ||
41 | { | ||
42 | if (cpu_present(mps_cpu)) | ||
43 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | ||
44 | else | ||
45 | return BAD_APICID; | ||
46 | } | ||
47 | |||
48 | #ifdef CONFIG_SMP | ||
49 | |||
50 | #define SMP_TRAMPOLINE_BASE 0x6000 | ||
51 | |||
52 | extern int __cpu_disable(void); | ||
53 | extern void __cpu_die(unsigned int cpu); | ||
54 | extern void prefill_possible_map(void); | ||
55 | extern unsigned __cpuinitdata disabled_cpus; | ||
56 | |||
57 | #define raw_smp_processor_id() read_pda(cpunumber) | ||
58 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) | ||
59 | |||
60 | #define stack_smp_processor_id() \ | ||
61 | ({ \ | ||
62 | struct thread_info *ti; \ | ||
63 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ | ||
64 | ti->cpu; \ | ||
65 | }) | ||
66 | |||
67 | /* | ||
68 | * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies | ||
69 | * scheduling and IPI sending and compresses data structures. | ||
70 | */ | ||
71 | static inline int num_booting_cpus(void) | ||
72 | { | ||
73 | return cpus_weight(cpu_callout_map); | ||
74 | } | ||
75 | |||
76 | extern void smp_send_reschedule(int cpu); | ||
77 | |||
78 | #else /* CONFIG_SMP */ | ||
79 | |||
80 | extern unsigned int boot_cpu_id; | ||
81 | #define cpu_physical_id(cpu) boot_cpu_id | ||
82 | #define stack_smp_processor_id() 0 | ||
83 | |||
84 | #endif /* !CONFIG_SMP */ | ||
85 | |||
86 | #define safe_smp_processor_id() smp_processor_id() | ||
87 | |||
88 | static __inline int logical_smp_processor_id(void) | ||
89 | { | ||
90 | /* we don't want to mark this access volatile - bad code generation */ | ||
91 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | ||
92 | } | ||
93 | |||
94 | static inline int hard_smp_processor_id(void) | ||
95 | { | ||
96 | /* we don't want to mark this access volatile - bad code generation */ | ||
97 | return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID)); | ||
98 | } | ||
99 | |||
100 | #endif | ||
101 | |||
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h index fa58cd55411a..9bd48b0a534b 100644 --- a/include/asm-x86/sparsemem.h +++ b/include/asm-x86/sparsemem.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #ifdef CONFIG_X86_32 | 17 | #ifdef CONFIG_X86_32 |
18 | # ifdef CONFIG_X86_PAE | 18 | # ifdef CONFIG_X86_PAE |
19 | # define SECTION_SIZE_BITS 30 | 19 | # define SECTION_SIZE_BITS 29 |
20 | # define MAX_PHYSADDR_BITS 36 | 20 | # define MAX_PHYSADDR_BITS 36 |
21 | # define MAX_PHYSMEM_BITS 36 | 21 | # define MAX_PHYSMEM_BITS 36 |
22 | # else | 22 | # else |
@@ -26,8 +26,8 @@ | |||
26 | # endif | 26 | # endif |
27 | #else /* CONFIG_X86_32 */ | 27 | #else /* CONFIG_X86_32 */ |
28 | # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ | 28 | # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ |
29 | # define MAX_PHYSADDR_BITS 40 | 29 | # define MAX_PHYSADDR_BITS 44 |
30 | # define MAX_PHYSMEM_BITS 40 | 30 | # define MAX_PHYSMEM_BITS 44 |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #endif /* CONFIG_SPARSEMEM */ | 33 | #endif /* CONFIG_SPARSEMEM */ |
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 23804c1890ff..bc6376f1bc5a 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -78,11 +78,11 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |||
78 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | 78 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; |
79 | } | 79 | } |
80 | 80 | ||
81 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 81 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) |
82 | { | 82 | { |
83 | short inc = 0x0100; | 83 | short inc = 0x0100; |
84 | 84 | ||
85 | __asm__ __volatile__ ( | 85 | asm volatile ( |
86 | LOCK_PREFIX "xaddw %w0, %1\n" | 86 | LOCK_PREFIX "xaddw %w0, %1\n" |
87 | "1:\t" | 87 | "1:\t" |
88 | "cmpb %h0, %b0\n\t" | 88 | "cmpb %h0, %b0\n\t" |
@@ -92,42 +92,40 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
92 | /* don't need lfence here, because loads are in-order */ | 92 | /* don't need lfence here, because loads are in-order */ |
93 | "jmp 1b\n" | 93 | "jmp 1b\n" |
94 | "2:" | 94 | "2:" |
95 | :"+Q" (inc), "+m" (lock->slock) | 95 | : "+Q" (inc), "+m" (lock->slock) |
96 | : | 96 | : |
97 | :"memory", "cc"); | 97 | : "memory", "cc"); |
98 | } | 98 | } |
99 | 99 | ||
100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 100 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
101 | 101 | ||
102 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 102 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) |
103 | { | 103 | { |
104 | int tmp; | 104 | int tmp; |
105 | short new; | 105 | short new; |
106 | 106 | ||
107 | asm volatile( | 107 | asm volatile("movw %2,%w0\n\t" |
108 | "movw %2,%w0\n\t" | 108 | "cmpb %h0,%b0\n\t" |
109 | "cmpb %h0,%b0\n\t" | 109 | "jne 1f\n\t" |
110 | "jne 1f\n\t" | 110 | "movw %w0,%w1\n\t" |
111 | "movw %w0,%w1\n\t" | 111 | "incb %h1\n\t" |
112 | "incb %h1\n\t" | 112 | "lock ; cmpxchgw %w1,%2\n\t" |
113 | "lock ; cmpxchgw %w1,%2\n\t" | 113 | "1:" |
114 | "1:" | 114 | "sete %b1\n\t" |
115 | "sete %b1\n\t" | 115 | "movzbl %b1,%0\n\t" |
116 | "movzbl %b1,%0\n\t" | 116 | : "=&a" (tmp), "=Q" (new), "+m" (lock->slock) |
117 | :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) | 117 | : |
118 | : | 118 | : "memory", "cc"); |
119 | : "memory", "cc"); | ||
120 | 119 | ||
121 | return tmp; | 120 | return tmp; |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 123 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) |
125 | { | 124 | { |
126 | __asm__ __volatile__( | 125 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
127 | UNLOCK_LOCK_PREFIX "incb %0" | 126 | : "+m" (lock->slock) |
128 | :"+m" (lock->slock) | 127 | : |
129 | : | 128 | : "memory", "cc"); |
130 | :"memory", "cc"); | ||
131 | } | 129 | } |
132 | #else | 130 | #else |
133 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 131 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
@@ -144,60 +142,57 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | |||
144 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; | 142 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; |
145 | } | 143 | } |
146 | 144 | ||
147 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 145 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) |
148 | { | 146 | { |
149 | int inc = 0x00010000; | 147 | int inc = 0x00010000; |
150 | int tmp; | 148 | int tmp; |
151 | 149 | ||
152 | __asm__ __volatile__ ( | 150 | asm volatile("lock ; xaddl %0, %1\n" |
153 | "lock ; xaddl %0, %1\n" | 151 | "movzwl %w0, %2\n\t" |
154 | "movzwl %w0, %2\n\t" | 152 | "shrl $16, %0\n\t" |
155 | "shrl $16, %0\n\t" | 153 | "1:\t" |
156 | "1:\t" | 154 | "cmpl %0, %2\n\t" |
157 | "cmpl %0, %2\n\t" | 155 | "je 2f\n\t" |
158 | "je 2f\n\t" | 156 | "rep ; nop\n\t" |
159 | "rep ; nop\n\t" | 157 | "movzwl %1, %2\n\t" |
160 | "movzwl %1, %2\n\t" | 158 | /* don't need lfence here, because loads are in-order */ |
161 | /* don't need lfence here, because loads are in-order */ | 159 | "jmp 1b\n" |
162 | "jmp 1b\n" | 160 | "2:" |
163 | "2:" | 161 | : "+Q" (inc), "+m" (lock->slock), "=r" (tmp) |
164 | :"+Q" (inc), "+m" (lock->slock), "=r" (tmp) | 162 | : |
165 | : | 163 | : "memory", "cc"); |
166 | :"memory", "cc"); | ||
167 | } | 164 | } |
168 | 165 | ||
169 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 166 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
170 | 167 | ||
171 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 168 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) |
172 | { | 169 | { |
173 | int tmp; | 170 | int tmp; |
174 | int new; | 171 | int new; |
175 | 172 | ||
176 | asm volatile( | 173 | asm volatile("movl %2,%0\n\t" |
177 | "movl %2,%0\n\t" | 174 | "movl %0,%1\n\t" |
178 | "movl %0,%1\n\t" | 175 | "roll $16, %0\n\t" |
179 | "roll $16, %0\n\t" | 176 | "cmpl %0,%1\n\t" |
180 | "cmpl %0,%1\n\t" | 177 | "jne 1f\n\t" |
181 | "jne 1f\n\t" | 178 | "addl $0x00010000, %1\n\t" |
182 | "addl $0x00010000, %1\n\t" | 179 | "lock ; cmpxchgl %1,%2\n\t" |
183 | "lock ; cmpxchgl %1,%2\n\t" | 180 | "1:" |
184 | "1:" | 181 | "sete %b1\n\t" |
185 | "sete %b1\n\t" | 182 | "movzbl %b1,%0\n\t" |
186 | "movzbl %b1,%0\n\t" | 183 | : "=&a" (tmp), "=r" (new), "+m" (lock->slock) |
187 | :"=&a" (tmp), "=r" (new), "+m" (lock->slock) | 184 | : |
188 | : | 185 | : "memory", "cc"); |
189 | : "memory", "cc"); | ||
190 | 186 | ||
191 | return tmp; | 187 | return tmp; |
192 | } | 188 | } |
193 | 189 | ||
194 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 190 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) |
195 | { | 191 | { |
196 | __asm__ __volatile__( | 192 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
197 | UNLOCK_LOCK_PREFIX "incw %0" | 193 | : "+m" (lock->slock) |
198 | :"+m" (lock->slock) | 194 | : |
199 | : | 195 | : "memory", "cc"); |
200 | :"memory", "cc"); | ||
201 | } | 196 | } |
202 | #endif | 197 | #endif |
203 | 198 | ||
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h index 165ab4bdc02b..f4bba131d068 100644 --- a/include/asm-x86/srat.h +++ b/include/asm-x86/srat.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Some of the code in this file has been gleaned from the 64 bit | 2 | * Some of the code in this file has been gleaned from the 64 bit |
3 | * discontigmem support code base. | 3 | * discontigmem support code base. |
4 | * | 4 | * |
5 | * Copyright (C) 2002, IBM Corp. | 5 | * Copyright (C) 2002, IBM Corp. |
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index c5d13a86dea7..b49369ad9a61 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* Let gcc decide wether to inline or use the out of line functions */ | 6 | /* Let gcc decide whether to inline or use the out of line functions */ |
7 | 7 | ||
8 | #define __HAVE_ARCH_STRCPY | 8 | #define __HAVE_ARCH_STRCPY |
9 | extern char *strcpy(char *dest, const char *src); | 9 | extern char *strcpy(char *dest, const char *src); |
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index e583da7918fb..52b5ab383395 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h | |||
@@ -3,26 +3,24 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* Written 2002 by Andi Kleen */ | 6 | /* Written 2002 by Andi Kleen */ |
7 | 7 | ||
8 | /* Only used for special circumstances. Stolen from i386/string.h */ | 8 | /* Only used for special circumstances. Stolen from i386/string.h */ |
9 | static __always_inline void * | 9 | static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) |
10 | __inline_memcpy(void * to, const void * from, size_t n) | ||
11 | { | 10 | { |
12 | unsigned long d0, d1, d2; | 11 | unsigned long d0, d1, d2; |
13 | __asm__ __volatile__( | 12 | asm volatile("rep ; movsl\n\t" |
14 | "rep ; movsl\n\t" | 13 | "testb $2,%b4\n\t" |
15 | "testb $2,%b4\n\t" | 14 | "je 1f\n\t" |
16 | "je 1f\n\t" | 15 | "movsw\n" |
17 | "movsw\n" | 16 | "1:\ttestb $1,%b4\n\t" |
18 | "1:\ttestb $1,%b4\n\t" | 17 | "je 2f\n\t" |
19 | "je 2f\n\t" | 18 | "movsb\n" |
20 | "movsb\n" | 19 | "2:" |
21 | "2:" | 20 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) |
22 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | 21 | : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) |
23 | :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) | 22 | : "memory"); |
24 | : "memory"); | 23 | return to; |
25 | return (to); | ||
26 | } | 24 | } |
27 | 25 | ||
28 | /* Even with __builtin_ the compiler may decide to use the out of line | 26 | /* Even with __builtin_ the compiler may decide to use the out of line |
@@ -32,28 +30,30 @@ return (to); | |||
32 | #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 | 30 | #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 |
33 | extern void *memcpy(void *to, const void *from, size_t len); | 31 | extern void *memcpy(void *to, const void *from, size_t len); |
34 | #else | 32 | #else |
35 | extern void *__memcpy(void *to, const void *from, size_t len); | 33 | extern void *__memcpy(void *to, const void *from, size_t len); |
36 | #define memcpy(dst,src,len) \ | 34 | #define memcpy(dst, src, len) \ |
37 | ({ size_t __len = (len); \ | 35 | ({ \ |
38 | void *__ret; \ | 36 | size_t __len = (len); \ |
39 | if (__builtin_constant_p(len) && __len >= 64) \ | 37 | void *__ret; \ |
40 | __ret = __memcpy((dst),(src),__len); \ | 38 | if (__builtin_constant_p(len) && __len >= 64) \ |
41 | else \ | 39 | __ret = __memcpy((dst), (src), __len); \ |
42 | __ret = __builtin_memcpy((dst),(src),__len); \ | 40 | else \ |
43 | __ret; }) | 41 | __ret = __builtin_memcpy((dst), (src), __len); \ |
42 | __ret; \ | ||
43 | }) | ||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #define __HAVE_ARCH_MEMSET | 46 | #define __HAVE_ARCH_MEMSET |
47 | void *memset(void *s, int c, size_t n); | 47 | void *memset(void *s, int c, size_t n); |
48 | 48 | ||
49 | #define __HAVE_ARCH_MEMMOVE | 49 | #define __HAVE_ARCH_MEMMOVE |
50 | void * memmove(void * dest,const void *src,size_t count); | 50 | void *memmove(void *dest, const void *src, size_t count); |
51 | 51 | ||
52 | int memcmp(const void * cs,const void * ct,size_t count); | 52 | int memcmp(const void *cs, const void *ct, size_t count); |
53 | size_t strlen(const char * s); | 53 | size_t strlen(const char *s); |
54 | char *strcpy(char * dest,const char *src); | 54 | char *strcpy(char *dest, const char *src); |
55 | char *strcat(char * dest, const char * src); | 55 | char *strcat(char *dest, const char *src); |
56 | int strcmp(const char * cs,const char * ct); | 56 | int strcmp(const char *cs, const char *ct); |
57 | 57 | ||
58 | #endif /* __KERNEL__ */ | 58 | #endif /* __KERNEL__ */ |
59 | 59 | ||
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h index 1bbda3ad7796..24e1c080aa8a 100644 --- a/include/asm-x86/suspend_32.h +++ b/include/asm-x86/suspend_32.h | |||
@@ -10,7 +10,7 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
10 | 10 | ||
11 | /* image of the saved processor state */ | 11 | /* image of the saved processor state */ |
12 | struct saved_context { | 12 | struct saved_context { |
13 | u16 es, fs, gs, ss; | 13 | u16 es, fs, gs, ss; |
14 | unsigned long cr0, cr2, cr3, cr4; | 14 | unsigned long cr0, cr2, cr3, cr4; |
15 | struct desc_ptr gdt; | 15 | struct desc_ptr gdt; |
16 | struct desc_ptr idt; | 16 | struct desc_ptr idt; |
@@ -32,11 +32,11 @@ extern unsigned long saved_edi; | |||
32 | static inline void acpi_save_register_state(unsigned long return_point) | 32 | static inline void acpi_save_register_state(unsigned long return_point) |
33 | { | 33 | { |
34 | saved_eip = return_point; | 34 | saved_eip = return_point; |
35 | asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); | 35 | asm volatile("movl %%esp,%0" : "=m" (saved_esp)); |
36 | asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); | 36 | asm volatile("movl %%ebp,%0" : "=m" (saved_ebp)); |
37 | asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); | 37 | asm volatile("movl %%ebx,%0" : "=m" (saved_ebx)); |
38 | asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); | 38 | asm volatile("movl %%edi,%0" : "=m" (saved_edi)); |
39 | asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); | 39 | asm volatile("movl %%esi,%0" : "=m" (saved_esi)); |
40 | } | 40 | } |
41 | 41 | ||
42 | #define acpi_restore_register_state() do {} while (0) | 42 | #define acpi_restore_register_state() do {} while (0) |
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h index 2eb92cb81a0d..dc3262b43072 100644 --- a/include/asm-x86/suspend_64.h +++ b/include/asm-x86/suspend_64.h | |||
@@ -9,8 +9,7 @@ | |||
9 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
10 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
11 | 11 | ||
12 | static inline int | 12 | static inline int arch_prepare_suspend(void) |
13 | arch_prepare_suspend(void) | ||
14 | { | 13 | { |
15 | return 0; | 14 | return 0; |
16 | } | 15 | } |
@@ -25,7 +24,7 @@ arch_prepare_suspend(void) | |||
25 | */ | 24 | */ |
26 | struct saved_context { | 25 | struct saved_context { |
27 | struct pt_regs regs; | 26 | struct pt_regs regs; |
28 | u16 ds, es, fs, gs, ss; | 27 | u16 ds, es, fs, gs, ss; |
29 | unsigned long gs_base, gs_kernel_base, fs_base; | 28 | unsigned long gs_base, gs_kernel_base, fs_base; |
30 | unsigned long cr0, cr2, cr3, cr4, cr8; | 29 | unsigned long cr0, cr2, cr3, cr4, cr8; |
31 | unsigned long efer; | 30 | unsigned long efer; |
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h index f9c589539a82..f5d9e74b1e4a 100644 --- a/include/asm-x86/swiotlb.h +++ b/include/asm-x86/swiotlb.h | |||
@@ -8,15 +8,15 @@ | |||
8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | 8 | extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, |
9 | size_t size, int dir); | 9 | size_t size, int dir); |
10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 10 | extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
11 | dma_addr_t *dma_handle, gfp_t flags); | 11 | dma_addr_t *dma_handle, gfp_t flags); |
12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 12 | extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
13 | size_t size, int dir); | 13 | size_t size, int dir); |
14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | 14 | extern void swiotlb_sync_single_for_cpu(struct device *hwdev, |
15 | dma_addr_t dev_addr, | 15 | dma_addr_t dev_addr, |
16 | size_t size, int dir); | 16 | size_t size, int dir); |
17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, | 17 | extern void swiotlb_sync_single_for_device(struct device *hwdev, |
18 | dma_addr_t dev_addr, | 18 | dma_addr_t dev_addr, |
19 | size_t size, int dir); | 19 | size_t size, int dir); |
20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | 20 | extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, |
21 | dma_addr_t dev_addr, | 21 | dma_addr_t dev_addr, |
22 | unsigned long offset, | 22 | unsigned long offset, |
@@ -26,18 +26,18 @@ extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | |||
26 | unsigned long offset, | 26 | unsigned long offset, |
27 | size_t size, int dir); | 27 | size_t size, int dir); |
28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | 28 | extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, |
29 | struct scatterlist *sg, int nelems, | 29 | struct scatterlist *sg, int nelems, |
30 | int dir); | 30 | int dir); |
31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, | 31 | extern void swiotlb_sync_sg_for_device(struct device *hwdev, |
32 | struct scatterlist *sg, int nelems, | 32 | struct scatterlist *sg, int nelems, |
33 | int dir); | 33 | int dir); |
34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | 34 | extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, |
35 | int nents, int direction); | 35 | int nents, int direction); |
36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | 36 | extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, |
37 | int nents, int direction); | 37 | int nents, int direction); |
38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); | 38 | extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); |
39 | extern void swiotlb_free_coherent (struct device *hwdev, size_t size, | 39 | extern void swiotlb_free_coherent(struct device *hwdev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle); | 40 | void *vaddr, dma_addr_t dma_handle); |
41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | 41 | extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); |
42 | extern void swiotlb_init(void); | 42 | extern void swiotlb_init(void); |
43 | 43 | ||
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h index 6b775c905666..b47a1d0b8a83 100644 --- a/include/asm-x86/sync_bitops.h +++ b/include/asm-x86/sync_bitops.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 13 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define ADDR (*(volatile long *) addr) | 16 | #define ADDR (*(volatile long *)addr) |
17 | 17 | ||
18 | /** | 18 | /** |
19 | * sync_set_bit - Atomically set a bit in memory | 19 | * sync_set_bit - Atomically set a bit in memory |
@@ -26,12 +26,12 @@ | |||
26 | * Note that @nr may be almost arbitrarily large; this function is not | 26 | * Note that @nr may be almost arbitrarily large; this function is not |
27 | * restricted to acting on a single-word quantity. | 27 | * restricted to acting on a single-word quantity. |
28 | */ | 28 | */ |
29 | static inline void sync_set_bit(int nr, volatile unsigned long * addr) | 29 | static inline void sync_set_bit(int nr, volatile unsigned long *addr) |
30 | { | 30 | { |
31 | __asm__ __volatile__("lock; btsl %1,%0" | 31 | asm volatile("lock; btsl %1,%0" |
32 | :"+m" (ADDR) | 32 | : "+m" (ADDR) |
33 | :"Ir" (nr) | 33 | : "Ir" (nr) |
34 | : "memory"); | 34 | : "memory"); |
35 | } | 35 | } |
36 | 36 | ||
37 | /** | 37 | /** |
@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) | |||
44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
45 | * in order to ensure changes are visible on other processors. | 45 | * in order to ensure changes are visible on other processors. |
46 | */ | 46 | */ |
47 | static inline void sync_clear_bit(int nr, volatile unsigned long * addr) | 47 | static inline void sync_clear_bit(int nr, volatile unsigned long *addr) |
48 | { | 48 | { |
49 | __asm__ __volatile__("lock; btrl %1,%0" | 49 | asm volatile("lock; btrl %1,%0" |
50 | :"+m" (ADDR) | 50 | : "+m" (ADDR) |
51 | :"Ir" (nr) | 51 | : "Ir" (nr) |
52 | : "memory"); | 52 | : "memory"); |
53 | } | 53 | } |
54 | 54 | ||
55 | /** | 55 | /** |
@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) | |||
61 | * Note that @nr may be almost arbitrarily large; this function is not | 61 | * Note that @nr may be almost arbitrarily large; this function is not |
62 | * restricted to acting on a single-word quantity. | 62 | * restricted to acting on a single-word quantity. |
63 | */ | 63 | */ |
64 | static inline void sync_change_bit(int nr, volatile unsigned long * addr) | 64 | static inline void sync_change_bit(int nr, volatile unsigned long *addr) |
65 | { | 65 | { |
66 | __asm__ __volatile__("lock; btcl %1,%0" | 66 | asm volatile("lock; btcl %1,%0" |
67 | :"+m" (ADDR) | 67 | : "+m" (ADDR) |
68 | :"Ir" (nr) | 68 | : "Ir" (nr) |
69 | : "memory"); | 69 | : "memory"); |
70 | } | 70 | } |
71 | 71 | ||
72 | /** | 72 | /** |
@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) | |||
77 | * This operation is atomic and cannot be reordered. | 77 | * This operation is atomic and cannot be reordered. |
78 | * It also implies a memory barrier. | 78 | * It also implies a memory barrier. |
79 | */ | 79 | */ |
80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | 80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) |
81 | { | 81 | { |
82 | int oldbit; | 82 | int oldbit; |
83 | 83 | ||
84 | __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" | 84 | asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" |
85 | :"=r" (oldbit),"+m" (ADDR) | 85 | : "=r" (oldbit), "+m" (ADDR) |
86 | :"Ir" (nr) : "memory"); | 86 | : "Ir" (nr) : "memory"); |
87 | return oldbit; | 87 | return oldbit; |
88 | } | 88 | } |
89 | 89 | ||
@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) | |||
95 | * This operation is atomic and cannot be reordered. | 95 | * This operation is atomic and cannot be reordered. |
96 | * It also implies a memory barrier. | 96 | * It also implies a memory barrier. |
97 | */ | 97 | */ |
98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) | 98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) |
99 | { | 99 | { |
100 | int oldbit; | 100 | int oldbit; |
101 | 101 | ||
102 | __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" | 102 | asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" |
103 | :"=r" (oldbit),"+m" (ADDR) | 103 | : "=r" (oldbit), "+m" (ADDR) |
104 | :"Ir" (nr) : "memory"); | 104 | : "Ir" (nr) : "memory"); |
105 | return oldbit; | 105 | return oldbit; |
106 | } | 106 | } |
107 | 107 | ||
@@ -113,36 +113,17 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) | |||
113 | * This operation is atomic and cannot be reordered. | 113 | * This operation is atomic and cannot be reordered. |
114 | * It also implies a memory barrier. | 114 | * It also implies a memory barrier. |
115 | */ | 115 | */ |
116 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) | 116 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) |
117 | { | 117 | { |
118 | int oldbit; | 118 | int oldbit; |
119 | 119 | ||
120 | __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" | 120 | asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" |
121 | :"=r" (oldbit),"+m" (ADDR) | 121 | : "=r" (oldbit), "+m" (ADDR) |
122 | :"Ir" (nr) : "memory"); | 122 | : "Ir" (nr) : "memory"); |
123 | return oldbit; | 123 | return oldbit; |
124 | } | 124 | } |
125 | 125 | ||
126 | static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr) | 126 | #define sync_test_bit(nr, addr) test_bit(nr, addr) |
127 | { | ||
128 | return ((1UL << (nr & 31)) & | ||
129 | (((const volatile unsigned int *)addr)[nr >> 5])) != 0; | ||
130 | } | ||
131 | |||
132 | static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) | ||
133 | { | ||
134 | int oldbit; | ||
135 | |||
136 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | ||
137 | :"=r" (oldbit) | ||
138 | :"m" (ADDR),"Ir" (nr)); | ||
139 | return oldbit; | ||
140 | } | ||
141 | |||
142 | #define sync_test_bit(nr,addr) \ | ||
143 | (__builtin_constant_p(nr) ? \ | ||
144 | sync_constant_test_bit((nr),(addr)) : \ | ||
145 | sync_var_test_bit((nr),(addr))) | ||
146 | 127 | ||
147 | #undef ADDR | 128 | #undef ADDR |
148 | 129 | ||
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 9cff02ffe6c2..a2f04cd79b29 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -27,22 +27,44 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
27 | * Saving eflags is important. It switches not only IOPL between tasks, | 27 | * Saving eflags is important. It switches not only IOPL between tasks, |
28 | * it also protects other tasks from NT leaking through sysenter etc. | 28 | * it also protects other tasks from NT leaking through sysenter etc. |
29 | */ | 29 | */ |
30 | #define switch_to(prev, next, last) do { \ | 30 | #define switch_to(prev, next, last) \ |
31 | unsigned long esi, edi; \ | 31 | do { \ |
32 | asm volatile("pushfl\n\t" /* Save flags */ \ | 32 | /* \ |
33 | "pushl %%ebp\n\t" \ | 33 | * Context-switching clobbers all registers, so we clobber \ |
34 | "movl %%esp,%0\n\t" /* save ESP */ \ | 34 | * them explicitly, via unused output variables. \ |
35 | "movl %5,%%esp\n\t" /* restore ESP */ \ | 35 | * (EAX and EBP is not listed because EBP is saved/restored \ |
36 | "movl $1f,%1\n\t" /* save EIP */ \ | 36 | * explicitly for wchan access and EAX is the return value of \ |
37 | "pushl %6\n\t" /* restore EIP */ \ | 37 | * __switch_to()) \ |
38 | "jmp __switch_to\n" \ | 38 | */ \ |
39 | unsigned long ebx, ecx, edx, esi, edi; \ | ||
40 | \ | ||
41 | asm volatile("pushfl\n\t" /* save flags */ \ | ||
42 | "pushl %%ebp\n\t" /* save EBP */ \ | ||
43 | "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ | ||
44 | "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ | ||
45 | "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ | ||
46 | "pushl %[next_ip]\n\t" /* restore EIP */ \ | ||
47 | "jmp __switch_to\n" /* regparm call */ \ | ||
39 | "1:\t" \ | 48 | "1:\t" \ |
40 | "popl %%ebp\n\t" \ | 49 | "popl %%ebp\n\t" /* restore EBP */ \ |
41 | "popfl" \ | 50 | "popfl\n" /* restore flags */ \ |
42 | :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \ | 51 | \ |
43 | "=a" (last), "=S" (esi), "=D" (edi) \ | 52 | /* output parameters */ \ |
44 | :"m" (next->thread.sp), "m" (next->thread.ip), \ | 53 | : [prev_sp] "=m" (prev->thread.sp), \ |
45 | "2" (prev), "d" (next)); \ | 54 | [prev_ip] "=m" (prev->thread.ip), \ |
55 | "=a" (last), \ | ||
56 | \ | ||
57 | /* clobbered output registers: */ \ | ||
58 | "=b" (ebx), "=c" (ecx), "=d" (edx), \ | ||
59 | "=S" (esi), "=D" (edi) \ | ||
60 | \ | ||
61 | /* input parameters: */ \ | ||
62 | : [next_sp] "m" (next->thread.sp), \ | ||
63 | [next_ip] "m" (next->thread.ip), \ | ||
64 | \ | ||
65 | /* regparm parameters for __switch_to(): */ \ | ||
66 | [prev] "a" (prev), \ | ||
67 | [next] "d" (next)); \ | ||
46 | } while (0) | 68 | } while (0) |
47 | 69 | ||
48 | /* | 70 | /* |
@@ -122,35 +144,34 @@ extern void load_gs_index(unsigned); | |||
122 | */ | 144 | */ |
123 | #define loadsegment(seg, value) \ | 145 | #define loadsegment(seg, value) \ |
124 | asm volatile("\n" \ | 146 | asm volatile("\n" \ |
125 | "1:\t" \ | 147 | "1:\t" \ |
126 | "movl %k0,%%" #seg "\n" \ | 148 | "movl %k0,%%" #seg "\n" \ |
127 | "2:\n" \ | 149 | "2:\n" \ |
128 | ".section .fixup,\"ax\"\n" \ | 150 | ".section .fixup,\"ax\"\n" \ |
129 | "3:\t" \ | 151 | "3:\t" \ |
130 | "movl %k1, %%" #seg "\n\t" \ | 152 | "movl %k1, %%" #seg "\n\t" \ |
131 | "jmp 2b\n" \ | 153 | "jmp 2b\n" \ |
132 | ".previous\n" \ | 154 | ".previous\n" \ |
133 | _ASM_EXTABLE(1b,3b) \ | 155 | _ASM_EXTABLE(1b,3b) \ |
134 | : :"r" (value), "r" (0)) | 156 | : :"r" (value), "r" (0)) |
135 | 157 | ||
136 | 158 | ||
137 | /* | 159 | /* |
138 | * Save a segment register away | 160 | * Save a segment register away |
139 | */ | 161 | */ |
140 | #define savesegment(seg, value) \ | 162 | #define savesegment(seg, value) \ |
141 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | 163 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
142 | 164 | ||
143 | static inline unsigned long get_limit(unsigned long segment) | 165 | static inline unsigned long get_limit(unsigned long segment) |
144 | { | 166 | { |
145 | unsigned long __limit; | 167 | unsigned long __limit; |
146 | __asm__("lsll %1,%0" | 168 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
147 | :"=r" (__limit):"r" (segment)); | 169 | return __limit + 1; |
148 | return __limit+1; | ||
149 | } | 170 | } |
150 | 171 | ||
151 | static inline void native_clts(void) | 172 | static inline void native_clts(void) |
152 | { | 173 | { |
153 | asm volatile ("clts"); | 174 | asm volatile("clts"); |
154 | } | 175 | } |
155 | 176 | ||
156 | /* | 177 | /* |
@@ -165,43 +186,43 @@ static unsigned long __force_order; | |||
165 | static inline unsigned long native_read_cr0(void) | 186 | static inline unsigned long native_read_cr0(void) |
166 | { | 187 | { |
167 | unsigned long val; | 188 | unsigned long val; |
168 | asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); | 189 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
169 | return val; | 190 | return val; |
170 | } | 191 | } |
171 | 192 | ||
172 | static inline void native_write_cr0(unsigned long val) | 193 | static inline void native_write_cr0(unsigned long val) |
173 | { | 194 | { |
174 | asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); | 195 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
175 | } | 196 | } |
176 | 197 | ||
177 | static inline unsigned long native_read_cr2(void) | 198 | static inline unsigned long native_read_cr2(void) |
178 | { | 199 | { |
179 | unsigned long val; | 200 | unsigned long val; |
180 | asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); | 201 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
181 | return val; | 202 | return val; |
182 | } | 203 | } |
183 | 204 | ||
184 | static inline void native_write_cr2(unsigned long val) | 205 | static inline void native_write_cr2(unsigned long val) |
185 | { | 206 | { |
186 | asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); | 207 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
187 | } | 208 | } |
188 | 209 | ||
189 | static inline unsigned long native_read_cr3(void) | 210 | static inline unsigned long native_read_cr3(void) |
190 | { | 211 | { |
191 | unsigned long val; | 212 | unsigned long val; |
192 | asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); | 213 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
193 | return val; | 214 | return val; |
194 | } | 215 | } |
195 | 216 | ||
196 | static inline void native_write_cr3(unsigned long val) | 217 | static inline void native_write_cr3(unsigned long val) |
197 | { | 218 | { |
198 | asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); | 219 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
199 | } | 220 | } |
200 | 221 | ||
201 | static inline unsigned long native_read_cr4(void) | 222 | static inline unsigned long native_read_cr4(void) |
202 | { | 223 | { |
203 | unsigned long val; | 224 | unsigned long val; |
204 | asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); | 225 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
205 | return val; | 226 | return val; |
206 | } | 227 | } |
207 | 228 | ||
@@ -213,7 +234,7 @@ static inline unsigned long native_read_cr4_safe(void) | |||
213 | #ifdef CONFIG_X86_32 | 234 | #ifdef CONFIG_X86_32 |
214 | asm volatile("1: mov %%cr4, %0\n" | 235 | asm volatile("1: mov %%cr4, %0\n" |
215 | "2:\n" | 236 | "2:\n" |
216 | _ASM_EXTABLE(1b,2b) | 237 | _ASM_EXTABLE(1b, 2b) |
217 | : "=r" (val), "=m" (__force_order) : "0" (0)); | 238 | : "=r" (val), "=m" (__force_order) : "0" (0)); |
218 | #else | 239 | #else |
219 | val = native_read_cr4(); | 240 | val = native_read_cr4(); |
@@ -223,7 +244,7 @@ static inline unsigned long native_read_cr4_safe(void) | |||
223 | 244 | ||
224 | static inline void native_write_cr4(unsigned long val) | 245 | static inline void native_write_cr4(unsigned long val) |
225 | { | 246 | { |
226 | asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); | 247 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
227 | } | 248 | } |
228 | 249 | ||
229 | #ifdef CONFIG_X86_64 | 250 | #ifdef CONFIG_X86_64 |
@@ -244,6 +265,7 @@ static inline void native_wbinvd(void) | |||
244 | { | 265 | { |
245 | asm volatile("wbinvd": : :"memory"); | 266 | asm volatile("wbinvd": : :"memory"); |
246 | } | 267 | } |
268 | |||
247 | #ifdef CONFIG_PARAVIRT | 269 | #ifdef CONFIG_PARAVIRT |
248 | #include <asm/paravirt.h> | 270 | #include <asm/paravirt.h> |
249 | #else | 271 | #else |
@@ -276,7 +298,7 @@ static inline void clflush(volatile void *__p) | |||
276 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | 298 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
277 | } | 299 | } |
278 | 300 | ||
279 | #define nop() __asm__ __volatile__ ("nop") | 301 | #define nop() asm volatile ("nop") |
280 | 302 | ||
281 | void disable_hlt(void); | 303 | void disable_hlt(void); |
282 | void enable_hlt(void); | 304 | void enable_hlt(void); |
@@ -296,16 +318,7 @@ void default_idle(void); | |||
296 | */ | 318 | */ |
297 | #ifdef CONFIG_X86_32 | 319 | #ifdef CONFIG_X86_32 |
298 | /* | 320 | /* |
299 | * For now, "wmb()" doesn't actually do anything, as all | 321 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
300 | * Intel CPU's follow what Intel calls a *Processor Order*, | ||
301 | * in which all writes are seen in the program order even | ||
302 | * outside the CPU. | ||
303 | * | ||
304 | * I expect future Intel CPU's to have a weaker ordering, | ||
305 | * but I'd also expect them to finally get their act together | ||
306 | * and add some real memory barriers if so. | ||
307 | * | ||
308 | * Some non intel clones support out of order store. wmb() ceases to be a | ||
309 | * nop for these. | 322 | * nop for these. |
310 | */ | 323 | */ |
311 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | 324 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) |
@@ -384,7 +397,7 @@ void default_idle(void); | |||
384 | # define smp_wmb() barrier() | 397 | # define smp_wmb() barrier() |
385 | #endif | 398 | #endif |
386 | #define smp_read_barrier_depends() read_barrier_depends() | 399 | #define smp_read_barrier_depends() read_barrier_depends() |
387 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 400 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
388 | #else | 401 | #else |
389 | #define smp_mb() barrier() | 402 | #define smp_mb() barrier() |
390 | #define smp_rmb() barrier() | 403 | #define smp_rmb() barrier() |
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h index cd955d3d112f..b1a4ea00df78 100644 --- a/include/asm-x86/tce.h +++ b/include/asm-x86/tce.h | |||
@@ -39,7 +39,7 @@ struct iommu_table; | |||
39 | #define TCE_RPN_MASK 0x0000fffffffff000ULL | 39 | #define TCE_RPN_MASK 0x0000fffffffff000ULL |
40 | 40 | ||
41 | extern void tce_build(struct iommu_table *tbl, unsigned long index, | 41 | extern void tce_build(struct iommu_table *tbl, unsigned long index, |
42 | unsigned int npages, unsigned long uaddr, int direction); | 42 | unsigned int npages, unsigned long uaddr, int direction); |
43 | extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); | 43 | extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); |
44 | extern void * __init alloc_tce_table(void); | 44 | extern void * __init alloc_tce_table(void); |
45 | extern void __init free_tce_table(void *tbl); | 45 | extern void __init free_tce_table(void *tbl); |
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 5bd508260ffb..4e053fa561a9 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h | |||
@@ -20,7 +20,8 @@ | |||
20 | * low level task data that entry.S needs immediate access to | 20 | * low level task data that entry.S needs immediate access to |
21 | * - this struct should fit entirely inside of one cache line | 21 | * - this struct should fit entirely inside of one cache line |
22 | * - this struct shares the supervisor stack pages | 22 | * - this struct shares the supervisor stack pages |
23 | * - if the contents of this structure are changed, the assembly constants must also be changed | 23 | * - if the contents of this structure are changed, |
24 | * the assembly constants must also be changed | ||
24 | */ | 25 | */ |
25 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
26 | 27 | ||
@@ -30,18 +31,16 @@ struct thread_info { | |||
30 | unsigned long flags; /* low level flags */ | 31 | unsigned long flags; /* low level flags */ |
31 | unsigned long status; /* thread-synchronous flags */ | 32 | unsigned long status; /* thread-synchronous flags */ |
32 | __u32 cpu; /* current CPU */ | 33 | __u32 cpu; /* current CPU */ |
33 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 34 | int preempt_count; /* 0 => preemptable, |
34 | 35 | <0 => BUG */ | |
35 | |||
36 | mm_segment_t addr_limit; /* thread address space: | 36 | mm_segment_t addr_limit; /* thread address space: |
37 | 0-0xBFFFFFFF for user-thead | 37 | 0-0xBFFFFFFF user-thread |
38 | 0-0xFFFFFFFF for kernel-thread | 38 | 0-0xFFFFFFFF kernel-thread |
39 | */ | 39 | */ |
40 | void *sysenter_return; | 40 | void *sysenter_return; |
41 | struct restart_block restart_block; | 41 | struct restart_block restart_block; |
42 | 42 | unsigned long previous_esp; /* ESP of the previous stack in | |
43 | unsigned long previous_esp; /* ESP of the previous stack in case | 43 | case of nested (IRQ) stacks |
44 | of nested (IRQ) stacks | ||
45 | */ | 44 | */ |
46 | __u8 supervisor_stack[0]; | 45 | __u8 supervisor_stack[0]; |
47 | }; | 46 | }; |
@@ -90,15 +89,16 @@ register unsigned long current_stack_pointer asm("esp") __used; | |||
90 | /* how to get the thread information struct from C */ | 89 | /* how to get the thread information struct from C */ |
91 | static inline struct thread_info *current_thread_info(void) | 90 | static inline struct thread_info *current_thread_info(void) |
92 | { | 91 | { |
93 | return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); | 92 | return (struct thread_info *) |
93 | (current_stack_pointer & ~(THREAD_SIZE - 1)); | ||
94 | } | 94 | } |
95 | 95 | ||
96 | /* thread information allocation */ | 96 | /* thread information allocation */ |
97 | #ifdef CONFIG_DEBUG_STACK_USAGE | 97 | #ifdef CONFIG_DEBUG_STACK_USAGE |
98 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | 98 | #define alloc_thread_info(tsk) ((struct thread_info *) \ |
99 | __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE))) | 99 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE))) |
100 | #else | 100 | #else |
101 | #define alloc_thread_info(tsk) ((struct thread_info *) \ | 101 | #define alloc_thread_info(tsk) ((struct thread_info *) \ |
102 | __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) | 102 | __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) |
103 | #endif | 103 | #endif |
104 | 104 | ||
@@ -107,7 +107,7 @@ static inline struct thread_info *current_thread_info(void) | |||
107 | #else /* !__ASSEMBLY__ */ | 107 | #else /* !__ASSEMBLY__ */ |
108 | 108 | ||
109 | /* how to get the thread information struct from ASM */ | 109 | /* how to get the thread information struct from ASM */ |
110 | #define GET_THREAD_INFO(reg) \ | 110 | #define GET_THREAD_INFO(reg) \ |
111 | movl $-THREAD_SIZE, reg; \ | 111 | movl $-THREAD_SIZE, reg; \ |
112 | andl %esp, reg | 112 | andl %esp, reg |
113 | 113 | ||
@@ -119,14 +119,16 @@ static inline struct thread_info *current_thread_info(void) | |||
119 | 119 | ||
120 | /* | 120 | /* |
121 | * thread information flags | 121 | * thread information flags |
122 | * - these are process state flags that various assembly files may need to access | 122 | * - these are process state flags that various |
123 | * assembly files may need to access | ||
123 | * - pending work-to-be-done flags are in LSW | 124 | * - pending work-to-be-done flags are in LSW |
124 | * - other flags in MSW | 125 | * - other flags in MSW |
125 | */ | 126 | */ |
126 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 127 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
127 | #define TIF_SIGPENDING 1 /* signal pending */ | 128 | #define TIF_SIGPENDING 1 /* signal pending */ |
128 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 129 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
129 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ | 130 | #define TIF_SINGLESTEP 3 /* restore singlestep on return to |
131 | user mode */ | ||
130 | #define TIF_IRET 4 /* return with iret */ | 132 | #define TIF_IRET 4 /* return with iret */ |
131 | #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ | 133 | #define TIF_SYSCALL_EMU 5 /* syscall emulation active */ |
132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ | 134 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ |
@@ -143,36 +145,36 @@ static inline struct thread_info *current_thread_info(void) | |||
143 | #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ | 145 | #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */ |
144 | #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ | 146 | #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */ |
145 | 147 | ||
146 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 148 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
147 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 149 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
148 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 150 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
149 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 151 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
150 | #define _TIF_IRET (1<<TIF_IRET) | 152 | #define _TIF_IRET (1 << TIF_IRET) |
151 | #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) | 153 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
152 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 154 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
153 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 155 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
154 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 156 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
155 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | 157 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) |
156 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 158 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
157 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 159 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
158 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 160 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
159 | #define _TIF_NOTSC (1<<TIF_NOTSC) | 161 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
160 | #define _TIF_FORCED_TF (1<<TIF_FORCED_TF) | 162 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
161 | #define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) | 163 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
162 | #define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) | 164 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
163 | #define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) | 165 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) |
164 | 166 | ||
165 | /* work to do on interrupt/exception return */ | 167 | /* work to do on interrupt/exception return */ |
166 | #define _TIF_WORK_MASK \ | 168 | #define _TIF_WORK_MASK \ |
167 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 169 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
168 | _TIF_SECCOMP | _TIF_SYSCALL_EMU)) | 170 | _TIF_SECCOMP | _TIF_SYSCALL_EMU)) |
169 | /* work to do on any return to u-space */ | 171 | /* work to do on any return to u-space */ |
170 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 172 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
171 | 173 | ||
172 | /* flags to check in __switch_to() */ | 174 | /* flags to check in __switch_to() */ |
173 | #define _TIF_WORK_CTXSW \ | 175 | #define _TIF_WORK_CTXSW \ |
174 | (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ | 176 | (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ |
175 | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) | 177 | _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) |
176 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 178 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW |
177 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) | 179 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) |
178 | 180 | ||
@@ -184,8 +186,10 @@ static inline struct thread_info *current_thread_info(void) | |||
184 | * ever touches our thread-synchronous status, so we don't | 186 | * ever touches our thread-synchronous status, so we don't |
185 | * have to worry about atomic accesses. | 187 | * have to worry about atomic accesses. |
186 | */ | 188 | */ |
187 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ | 189 | #define TS_USEDFPU 0x0001 /* FPU was used by this task |
188 | #define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */ | 190 | this quantum (SMP) */ |
191 | #define TS_POLLING 0x0002 /* True if in idle loop | ||
192 | and not sleeping */ | ||
189 | 193 | ||
190 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 194 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
191 | 195 | ||
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index 6c9b214b8fc3..1e5c6f6152cd 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h | |||
@@ -29,9 +29,9 @@ struct thread_info { | |||
29 | __u32 flags; /* low level flags */ | 29 | __u32 flags; /* low level flags */ |
30 | __u32 status; /* thread synchronous flags */ | 30 | __u32 status; /* thread synchronous flags */ |
31 | __u32 cpu; /* current CPU */ | 31 | __u32 cpu; /* current CPU */ |
32 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 32 | int preempt_count; /* 0 => preemptable, |
33 | 33 | <0 => BUG */ | |
34 | mm_segment_t addr_limit; | 34 | mm_segment_t addr_limit; |
35 | struct restart_block restart_block; | 35 | struct restart_block restart_block; |
36 | #ifdef CONFIG_IA32_EMULATION | 36 | #ifdef CONFIG_IA32_EMULATION |
37 | void __user *sysenter_return; | 37 | void __user *sysenter_return; |
@@ -61,17 +61,17 @@ struct thread_info { | |||
61 | #define init_stack (init_thread_union.stack) | 61 | #define init_stack (init_thread_union.stack) |
62 | 62 | ||
63 | static inline struct thread_info *current_thread_info(void) | 63 | static inline struct thread_info *current_thread_info(void) |
64 | { | 64 | { |
65 | struct thread_info *ti; | 65 | struct thread_info *ti; |
66 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); | 66 | ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE); |
67 | return ti; | 67 | return ti; |
68 | } | 68 | } |
69 | 69 | ||
70 | /* do not use in interrupt context */ | 70 | /* do not use in interrupt context */ |
71 | static inline struct thread_info *stack_thread_info(void) | 71 | static inline struct thread_info *stack_thread_info(void) |
72 | { | 72 | { |
73 | struct thread_info *ti; | 73 | struct thread_info *ti; |
74 | __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1))); | 74 | asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1))); |
75 | return ti; | 75 | return ti; |
76 | } | 76 | } |
77 | 77 | ||
@@ -82,8 +82,8 @@ static inline struct thread_info *stack_thread_info(void) | |||
82 | #define THREAD_FLAGS GFP_KERNEL | 82 | #define THREAD_FLAGS GFP_KERNEL |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #define alloc_thread_info(tsk) \ | 85 | #define alloc_thread_info(tsk) \ |
86 | ((struct thread_info *) __get_free_pages(THREAD_FLAGS, THREAD_ORDER)) | 86 | ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) |
87 | 87 | ||
88 | #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) | 88 | #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) |
89 | 89 | ||
@@ -98,7 +98,8 @@ static inline struct thread_info *stack_thread_info(void) | |||
98 | 98 | ||
99 | /* | 99 | /* |
100 | * thread information flags | 100 | * thread information flags |
101 | * - these are process state flags that various assembly files may need to access | 101 | * - these are process state flags that various assembly files |
102 | * may need to access | ||
102 | * - pending work-to-be-done flags are in LSW | 103 | * - pending work-to-be-done flags are in LSW |
103 | * - other flags in MSW | 104 | * - other flags in MSW |
104 | * Warning: layout of LSW is hardcoded in entry.S | 105 | * Warning: layout of LSW is hardcoded in entry.S |
@@ -114,7 +115,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
114 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 115 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
115 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | 116 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ |
116 | /* 16 free */ | 117 | /* 16 free */ |
117 | #define TIF_IA32 17 /* 32bit process */ | 118 | #define TIF_IA32 17 /* 32bit process */ |
118 | #define TIF_FORK 18 /* ret_from_fork */ | 119 | #define TIF_FORK 18 /* ret_from_fork */ |
119 | #define TIF_ABI_PENDING 19 | 120 | #define TIF_ABI_PENDING 19 |
120 | #define TIF_MEMDIE 20 | 121 | #define TIF_MEMDIE 20 |
@@ -126,39 +127,40 @@ static inline struct thread_info *stack_thread_info(void) | |||
126 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 127 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
127 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ | 128 | #define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ |
128 | 129 | ||
129 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 130 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
130 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | 131 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
131 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | 132 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
132 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | 133 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
133 | #define _TIF_IRET (1<<TIF_IRET) | 134 | #define _TIF_IRET (1 << TIF_IRET) |
134 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 135 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
135 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 136 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
136 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 137 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) |
137 | #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) | 138 | #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) |
138 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | 139 | #define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED) |
139 | #define _TIF_IA32 (1<<TIF_IA32) | 140 | #define _TIF_IA32 (1 << TIF_IA32) |
140 | #define _TIF_FORK (1<<TIF_FORK) | 141 | #define _TIF_FORK (1 << TIF_FORK) |
141 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | 142 | #define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) |
142 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 143 | #define _TIF_DEBUG (1 << TIF_DEBUG) |
143 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 144 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
144 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 145 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
145 | #define _TIF_FORCED_TF (1<<TIF_FORCED_TF) | 146 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
146 | #define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) | 147 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
147 | #define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR) | 148 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
148 | #define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS) | 149 | #define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS) |
149 | 150 | ||
150 | /* work to do on interrupt/exception return */ | 151 | /* work to do on interrupt/exception return */ |
151 | #define _TIF_WORK_MASK \ | 152 | #define _TIF_WORK_MASK \ |
152 | (0x0000FFFF & ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | 153 | (0x0000FFFF & \ |
154 | ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP|_TIF_SECCOMP)) | ||
153 | /* work to do on any return to user space */ | 155 | /* work to do on any return to user space */ |
154 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 156 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
155 | 157 | ||
156 | #define _TIF_DO_NOTIFY_MASK \ | 158 | #define _TIF_DO_NOTIFY_MASK \ |
157 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | 159 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) |
158 | 160 | ||
159 | /* flags to check in __switch_to() */ | 161 | /* flags to check in __switch_to() */ |
160 | #define _TIF_WORK_CTXSW \ | 162 | #define _TIF_WORK_CTXSW \ |
161 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) | 163 | (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS) |
162 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW | 164 | #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW |
163 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) | 165 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) |
164 | 166 | ||
@@ -171,9 +173,11 @@ static inline struct thread_info *stack_thread_info(void) | |||
171 | * ever touches our thread-synchronous status, so we don't | 173 | * ever touches our thread-synchronous status, so we don't |
172 | * have to worry about atomic accesses. | 174 | * have to worry about atomic accesses. |
173 | */ | 175 | */ |
174 | #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ | 176 | #define TS_USEDFPU 0x0001 /* FPU was used by this task |
177 | this quantum (SMP) */ | ||
175 | #define TS_COMPAT 0x0002 /* 32bit syscall active */ | 178 | #define TS_COMPAT 0x0002 /* 32bit syscall active */ |
176 | #define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ | 179 | #define TS_POLLING 0x0004 /* true if in idle loop |
180 | and not sleeping */ | ||
177 | 181 | ||
178 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 182 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
179 | 183 | ||
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 3998709ed637..0c0674d94255 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h | |||
@@ -32,7 +32,7 @@ static inline void __native_flush_tlb_global(void) | |||
32 | 32 | ||
33 | static inline void __native_flush_tlb_single(unsigned long addr) | 33 | static inline void __native_flush_tlb_single(unsigned long addr) |
34 | { | 34 | { |
35 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); | 35 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void __flush_tlb_all(void) | 38 | static inline void __flush_tlb_all(void) |
@@ -134,8 +134,7 @@ void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | |||
134 | #define TLBSTATE_LAZY 2 | 134 | #define TLBSTATE_LAZY 2 |
135 | 135 | ||
136 | #ifdef CONFIG_X86_32 | 136 | #ifdef CONFIG_X86_32 |
137 | struct tlb_state | 137 | struct tlb_state { |
138 | { | ||
139 | struct mm_struct *active_mm; | 138 | struct mm_struct *active_mm; |
140 | int state; | 139 | int state; |
141 | char __cacheline_padding[L1_CACHE_BYTES-8]; | 140 | char __cacheline_padding[L1_CACHE_BYTES-8]; |
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 8af05a93f097..81a29eb08ac4 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h | |||
@@ -32,13 +32,18 @@ | |||
32 | /* Mappings between logical cpu number and node number */ | 32 | /* Mappings between logical cpu number and node number */ |
33 | #ifdef CONFIG_X86_32 | 33 | #ifdef CONFIG_X86_32 |
34 | extern int cpu_to_node_map[]; | 34 | extern int cpu_to_node_map[]; |
35 | |||
36 | #else | 35 | #else |
36 | /* Returns the number of the current Node. */ | ||
37 | #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) | ||
38 | #endif | ||
39 | |||
37 | DECLARE_PER_CPU(int, x86_cpu_to_node_map); | 40 | DECLARE_PER_CPU(int, x86_cpu_to_node_map); |
41 | |||
42 | #ifdef CONFIG_SMP | ||
38 | extern int x86_cpu_to_node_map_init[]; | 43 | extern int x86_cpu_to_node_map_init[]; |
39 | extern void *x86_cpu_to_node_map_early_ptr; | 44 | extern void *x86_cpu_to_node_map_early_ptr; |
40 | /* Returns the number of the current Node. */ | 45 | #else |
41 | #define numa_node_id() (early_cpu_to_node(raw_smp_processor_id())) | 46 | #define x86_cpu_to_node_map_early_ptr NULL |
42 | #endif | 47 | #endif |
43 | 48 | ||
44 | extern cpumask_t node_to_cpumask_map[]; | 49 | extern cpumask_t node_to_cpumask_map[]; |
@@ -54,6 +59,8 @@ static inline int cpu_to_node(int cpu) | |||
54 | } | 59 | } |
55 | 60 | ||
56 | #else /* CONFIG_X86_64 */ | 61 | #else /* CONFIG_X86_64 */ |
62 | |||
63 | #ifdef CONFIG_SMP | ||
57 | static inline int early_cpu_to_node(int cpu) | 64 | static inline int early_cpu_to_node(int cpu) |
58 | { | 65 | { |
59 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; | 66 | int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; |
@@ -65,21 +72,21 @@ static inline int early_cpu_to_node(int cpu) | |||
65 | else | 72 | else |
66 | return NUMA_NO_NODE; | 73 | return NUMA_NO_NODE; |
67 | } | 74 | } |
75 | #else | ||
76 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) | ||
77 | #endif | ||
68 | 78 | ||
69 | static inline int cpu_to_node(int cpu) | 79 | static inline int cpu_to_node(int cpu) |
70 | { | 80 | { |
71 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 81 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
72 | if (x86_cpu_to_node_map_early_ptr) { | 82 | if (x86_cpu_to_node_map_early_ptr) { |
73 | printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", | 83 | printk("KERN_NOTICE cpu_to_node(%d): usage too early!\n", |
74 | (int)cpu); | 84 | (int)cpu); |
75 | dump_stack(); | 85 | dump_stack(); |
76 | return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; | 86 | return ((int *)x86_cpu_to_node_map_early_ptr)[cpu]; |
77 | } | 87 | } |
78 | #endif | 88 | #endif |
79 | if (per_cpu_offset(cpu)) | 89 | return per_cpu(x86_cpu_to_node_map, cpu); |
80 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
81 | else | ||
82 | return NUMA_NO_NODE; | ||
83 | } | 90 | } |
84 | #endif /* CONFIG_X86_64 */ | 91 | #endif /* CONFIG_X86_64 */ |
85 | 92 | ||
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h new file mode 100644 index 000000000000..b156b08d0131 --- /dev/null +++ b/include/asm-x86/trampoline.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __TRAMPOLINE_HEADER | ||
2 | #define __TRAMPOLINE_HEADER | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | /* | ||
7 | * Trampoline 80x86 program as an array. | ||
8 | */ | ||
9 | extern const unsigned char trampoline_data []; | ||
10 | extern const unsigned char trampoline_end []; | ||
11 | extern unsigned char *trampoline_base; | ||
12 | |||
13 | extern unsigned long init_rsp; | ||
14 | extern unsigned long initial_code; | ||
15 | |||
16 | #define TRAMPOLINE_BASE 0x6000 | ||
17 | extern unsigned long setup_trampoline(void); | ||
18 | |||
19 | #endif /* __ASSEMBLY__ */ | ||
20 | |||
21 | #endif /* __TRAMPOLINE_HEADER */ | ||
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h index 7d3e27f7d484..d2d8eb5b55f5 100644 --- a/include/asm-x86/tsc.h +++ b/include/asm-x86/tsc.h | |||
@@ -42,7 +42,7 @@ static inline cycles_t vget_cycles(void) | |||
42 | if (!cpu_has_tsc) | 42 | if (!cpu_has_tsc) |
43 | return 0; | 43 | return 0; |
44 | #endif | 44 | #endif |
45 | return (cycles_t) __native_read_tsc(); | 45 | return (cycles_t)__native_read_tsc(); |
46 | } | 46 | } |
47 | 47 | ||
48 | extern void tsc_init(void); | 48 | extern void tsc_init(void); |
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index fcc570ec4fee..8e7595c1f34e 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define get_fs() (current_thread_info()->addr_limit) | 32 | #define get_fs() (current_thread_info()->addr_limit) |
33 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 33 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
34 | 34 | ||
35 | #define segment_eq(a,b) ((a).seg == (b).seg) | 35 | #define segment_eq(a, b) ((a).seg == (b).seg) |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * movsl can be slow when source and dest are not both 8-byte aligned | 38 | * movsl can be slow when source and dest are not both 8-byte aligned |
@@ -43,7 +43,9 @@ extern struct movsl_mask { | |||
43 | } ____cacheline_aligned_in_smp movsl_mask; | 43 | } ____cacheline_aligned_in_smp movsl_mask; |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) | 46 | #define __addr_ok(addr) \ |
47 | ((unsigned long __force)(addr) < \ | ||
48 | (current_thread_info()->addr_limit.seg)) | ||
47 | 49 | ||
48 | /* | 50 | /* |
49 | * Test whether a block of memory is a valid user space address. | 51 | * Test whether a block of memory is a valid user space address. |
@@ -54,13 +56,16 @@ extern struct movsl_mask { | |||
54 | * | 56 | * |
55 | * This needs 33-bit arithmetic. We have a carry... | 57 | * This needs 33-bit arithmetic. We have a carry... |
56 | */ | 58 | */ |
57 | #define __range_ok(addr,size) ({ \ | 59 | #define __range_ok(addr, size) \ |
58 | unsigned long flag,roksum; \ | 60 | ({ \ |
59 | __chk_user_ptr(addr); \ | 61 | unsigned long flag, roksum; \ |
60 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ | 62 | __chk_user_ptr(addr); \ |
61 | :"=&r" (flag), "=r" (roksum) \ | 63 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ |
62 | :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ | 64 | :"=&r" (flag), "=r" (roksum) \ |
63 | flag; }) | 65 | :"1" (addr), "g" ((int)(size)), \ |
66 | "rm" (current_thread_info()->addr_limit.seg)); \ | ||
67 | flag; \ | ||
68 | }) | ||
64 | 69 | ||
65 | /** | 70 | /** |
66 | * access_ok: - Checks if a user space pointer is valid | 71 | * access_ok: - Checks if a user space pointer is valid |
@@ -81,7 +86,7 @@ extern struct movsl_mask { | |||
81 | * checks that the pointer is in the user space range - after calling | 86 | * checks that the pointer is in the user space range - after calling |
82 | * this function, memory access functions may still return -EFAULT. | 87 | * this function, memory access functions may still return -EFAULT. |
83 | */ | 88 | */ |
84 | #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) | 89 | #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) |
85 | 90 | ||
86 | /* | 91 | /* |
87 | * The exception table consists of pairs of addresses: the first is the | 92 | * The exception table consists of pairs of addresses: the first is the |
@@ -96,8 +101,7 @@ extern struct movsl_mask { | |||
96 | * on our cache or tlb entries. | 101 | * on our cache or tlb entries. |
97 | */ | 102 | */ |
98 | 103 | ||
99 | struct exception_table_entry | 104 | struct exception_table_entry { |
100 | { | ||
101 | unsigned long insn, fixup; | 105 | unsigned long insn, fixup; |
102 | }; | 106 | }; |
103 | 107 | ||
@@ -122,13 +126,15 @@ extern void __get_user_1(void); | |||
122 | extern void __get_user_2(void); | 126 | extern void __get_user_2(void); |
123 | extern void __get_user_4(void); | 127 | extern void __get_user_4(void); |
124 | 128 | ||
125 | #define __get_user_x(size,ret,x,ptr) \ | 129 | #define __get_user_x(size, ret, x, ptr) \ |
126 | __asm__ __volatile__("call __get_user_" #size \ | 130 | asm volatile("call __get_user_" #size \ |
127 | :"=a" (ret),"=d" (x) \ | 131 | :"=a" (ret),"=d" (x) \ |
128 | :"0" (ptr)) | 132 | :"0" (ptr)) |
133 | |||
129 | 134 | ||
135 | /* Careful: we have to cast the result to the type of the pointer | ||
136 | * for sign reasons */ | ||
130 | 137 | ||
131 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ | ||
132 | /** | 138 | /** |
133 | * get_user: - Get a simple variable from user space. | 139 | * get_user: - Get a simple variable from user space. |
134 | * @x: Variable to store result. | 140 | * @x: Variable to store result. |
@@ -146,15 +152,24 @@ extern void __get_user_4(void); | |||
146 | * Returns zero on success, or -EFAULT on error. | 152 | * Returns zero on success, or -EFAULT on error. |
147 | * On error, the variable @x is set to zero. | 153 | * On error, the variable @x is set to zero. |
148 | */ | 154 | */ |
149 | #define get_user(x,ptr) \ | 155 | #define get_user(x, ptr) \ |
150 | ({ int __ret_gu; \ | 156 | ({ \ |
157 | int __ret_gu; \ | ||
151 | unsigned long __val_gu; \ | 158 | unsigned long __val_gu; \ |
152 | __chk_user_ptr(ptr); \ | 159 | __chk_user_ptr(ptr); \ |
153 | switch(sizeof (*(ptr))) { \ | 160 | switch (sizeof(*(ptr))) { \ |
154 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ | 161 | case 1: \ |
155 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ | 162 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
156 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ | 163 | break; \ |
157 | default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ | 164 | case 2: \ |
165 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ | ||
166 | break; \ | ||
167 | case 4: \ | ||
168 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
169 | break; \ | ||
170 | default: \ | ||
171 | __get_user_x(X, __ret_gu, __val_gu, ptr); \ | ||
172 | break; \ | ||
158 | } \ | 173 | } \ |
159 | (x) = (__typeof__(*(ptr)))__val_gu; \ | 174 | (x) = (__typeof__(*(ptr)))__val_gu; \ |
160 | __ret_gu; \ | 175 | __ret_gu; \ |
@@ -171,11 +186,25 @@ extern void __put_user_2(void); | |||
171 | extern void __put_user_4(void); | 186 | extern void __put_user_4(void); |
172 | extern void __put_user_8(void); | 187 | extern void __put_user_8(void); |
173 | 188 | ||
174 | #define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | 189 | #define __put_user_1(x, ptr) \ |
175 | #define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | 190 | asm volatile("call __put_user_1" : "=a" (__ret_pu) \ |
176 | #define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) | 191 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) |
177 | #define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) | 192 | |
178 | #define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) | 193 | #define __put_user_2(x, ptr) \ |
194 | asm volatile("call __put_user_2" : "=a" (__ret_pu) \ | ||
195 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
196 | |||
197 | #define __put_user_4(x, ptr) \ | ||
198 | asm volatile("call __put_user_4" : "=a" (__ret_pu) \ | ||
199 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
200 | |||
201 | #define __put_user_8(x, ptr) \ | ||
202 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ | ||
203 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
204 | |||
205 | #define __put_user_X(x, ptr) \ | ||
206 | asm volatile("call __put_user_X" : "=a" (__ret_pu) \ | ||
207 | : "c" (ptr)) | ||
179 | 208 | ||
180 | /** | 209 | /** |
181 | * put_user: - Write a simple value into user space. | 210 | * put_user: - Write a simple value into user space. |
@@ -195,32 +224,43 @@ extern void __put_user_8(void); | |||
195 | */ | 224 | */ |
196 | #ifdef CONFIG_X86_WP_WORKS_OK | 225 | #ifdef CONFIG_X86_WP_WORKS_OK |
197 | 226 | ||
198 | #define put_user(x,ptr) \ | 227 | #define put_user(x, ptr) \ |
199 | ({ int __ret_pu; \ | 228 | ({ \ |
229 | int __ret_pu; \ | ||
200 | __typeof__(*(ptr)) __pu_val; \ | 230 | __typeof__(*(ptr)) __pu_val; \ |
201 | __chk_user_ptr(ptr); \ | 231 | __chk_user_ptr(ptr); \ |
202 | __pu_val = x; \ | 232 | __pu_val = x; \ |
203 | switch(sizeof(*(ptr))) { \ | 233 | switch (sizeof(*(ptr))) { \ |
204 | case 1: __put_user_1(__pu_val, ptr); break; \ | 234 | case 1: \ |
205 | case 2: __put_user_2(__pu_val, ptr); break; \ | 235 | __put_user_1(__pu_val, ptr); \ |
206 | case 4: __put_user_4(__pu_val, ptr); break; \ | 236 | break; \ |
207 | case 8: __put_user_8(__pu_val, ptr); break; \ | 237 | case 2: \ |
208 | default:__put_user_X(__pu_val, ptr); break; \ | 238 | __put_user_2(__pu_val, ptr); \ |
239 | break; \ | ||
240 | case 4: \ | ||
241 | __put_user_4(__pu_val, ptr); \ | ||
242 | break; \ | ||
243 | case 8: \ | ||
244 | __put_user_8(__pu_val, ptr); \ | ||
245 | break; \ | ||
246 | default: \ | ||
247 | __put_user_X(__pu_val, ptr); \ | ||
248 | break; \ | ||
209 | } \ | 249 | } \ |
210 | __ret_pu; \ | 250 | __ret_pu; \ |
211 | }) | 251 | }) |
212 | 252 | ||
213 | #else | 253 | #else |
214 | #define put_user(x,ptr) \ | 254 | #define put_user(x, ptr) \ |
215 | ({ \ | 255 | ({ \ |
216 | int __ret_pu; \ | 256 | int __ret_pu; \ |
217 | __typeof__(*(ptr)) __pus_tmp = x; \ | 257 | __typeof__(*(ptr))__pus_tmp = x; \ |
218 | __ret_pu=0; \ | 258 | __ret_pu = 0; \ |
219 | if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ | 259 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ |
220 | sizeof(*(ptr))) != 0)) \ | 260 | sizeof(*(ptr))) != 0)) \ |
221 | __ret_pu=-EFAULT; \ | 261 | __ret_pu = -EFAULT; \ |
222 | __ret_pu; \ | 262 | __ret_pu; \ |
223 | }) | 263 | }) |
224 | 264 | ||
225 | 265 | ||
226 | #endif | 266 | #endif |
@@ -245,8 +285,8 @@ extern void __put_user_8(void); | |||
245 | * Returns zero on success, or -EFAULT on error. | 285 | * Returns zero on success, or -EFAULT on error. |
246 | * On error, the variable @x is set to zero. | 286 | * On error, the variable @x is set to zero. |
247 | */ | 287 | */ |
248 | #define __get_user(x,ptr) \ | 288 | #define __get_user(x, ptr) \ |
249 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 289 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
250 | 290 | ||
251 | 291 | ||
252 | /** | 292 | /** |
@@ -268,54 +308,62 @@ extern void __put_user_8(void); | |||
268 | * | 308 | * |
269 | * Returns zero on success, or -EFAULT on error. | 309 | * Returns zero on success, or -EFAULT on error. |
270 | */ | 310 | */ |
271 | #define __put_user(x,ptr) \ | 311 | #define __put_user(x, ptr) \ |
272 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 312 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
273 | 313 | ||
274 | #define __put_user_nocheck(x,ptr,size) \ | 314 | #define __put_user_nocheck(x, ptr, size) \ |
275 | ({ \ | 315 | ({ \ |
276 | long __pu_err; \ | 316 | long __pu_err; \ |
277 | __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ | 317 | __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ |
278 | __pu_err; \ | 318 | __pu_err; \ |
279 | }) | 319 | }) |
280 | 320 | ||
281 | 321 | ||
282 | #define __put_user_u64(x, addr, err) \ | 322 | #define __put_user_u64(x, addr, err) \ |
283 | __asm__ __volatile__( \ | 323 | asm volatile("1: movl %%eax,0(%2)\n" \ |
284 | "1: movl %%eax,0(%2)\n" \ | 324 | "2: movl %%edx,4(%2)\n" \ |
285 | "2: movl %%edx,4(%2)\n" \ | 325 | "3:\n" \ |
286 | "3:\n" \ | 326 | ".section .fixup,\"ax\"\n" \ |
287 | ".section .fixup,\"ax\"\n" \ | 327 | "4: movl %3,%0\n" \ |
288 | "4: movl %3,%0\n" \ | 328 | " jmp 3b\n" \ |
289 | " jmp 3b\n" \ | 329 | ".previous\n" \ |
290 | ".previous\n" \ | 330 | _ASM_EXTABLE(1b, 4b) \ |
291 | _ASM_EXTABLE(1b,4b) \ | 331 | _ASM_EXTABLE(2b, 4b) \ |
292 | _ASM_EXTABLE(2b,4b) \ | 332 | : "=r" (err) \ |
293 | : "=r"(err) \ | 333 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) |
294 | : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) | ||
295 | 334 | ||
296 | #ifdef CONFIG_X86_WP_WORKS_OK | 335 | #ifdef CONFIG_X86_WP_WORKS_OK |
297 | 336 | ||
298 | #define __put_user_size(x,ptr,size,retval,errret) \ | 337 | #define __put_user_size(x, ptr, size, retval, errret) \ |
299 | do { \ | 338 | do { \ |
300 | retval = 0; \ | 339 | retval = 0; \ |
301 | __chk_user_ptr(ptr); \ | 340 | __chk_user_ptr(ptr); \ |
302 | switch (size) { \ | 341 | switch (size) { \ |
303 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ | 342 | case 1: \ |
304 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ | 343 | __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ |
305 | case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ | 344 | break; \ |
306 | case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ | 345 | case 2: \ |
307 | default: __put_user_bad(); \ | 346 | __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ |
347 | break; \ | ||
348 | case 4: \ | ||
349 | __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \ | ||
350 | break; \ | ||
351 | case 8: \ | ||
352 | __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ | ||
353 | break; \ | ||
354 | default: \ | ||
355 | __put_user_bad(); \ | ||
308 | } \ | 356 | } \ |
309 | } while (0) | 357 | } while (0) |
310 | 358 | ||
311 | #else | 359 | #else |
312 | 360 | ||
313 | #define __put_user_size(x,ptr,size,retval,errret) \ | 361 | #define __put_user_size(x, ptr, size, retval, errret) \ |
314 | do { \ | 362 | do { \ |
315 | __typeof__(*(ptr)) __pus_tmp = x; \ | 363 | __typeof__(*(ptr))__pus_tmp = x; \ |
316 | retval = 0; \ | 364 | retval = 0; \ |
317 | \ | 365 | \ |
318 | if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ | 366 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ |
319 | retval = errret; \ | 367 | retval = errret; \ |
320 | } while (0) | 368 | } while (0) |
321 | 369 | ||
@@ -329,65 +377,70 @@ struct __large_struct { unsigned long buf[100]; }; | |||
329 | * aliasing issues. | 377 | * aliasing issues. |
330 | */ | 378 | */ |
331 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 379 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
332 | __asm__ __volatile__( \ | 380 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ |
333 | "1: mov"itype" %"rtype"1,%2\n" \ | 381 | "2:\n" \ |
334 | "2:\n" \ | 382 | ".section .fixup,\"ax\"\n" \ |
335 | ".section .fixup,\"ax\"\n" \ | 383 | "3: movl %3,%0\n" \ |
336 | "3: movl %3,%0\n" \ | 384 | " jmp 2b\n" \ |
337 | " jmp 2b\n" \ | 385 | ".previous\n" \ |
338 | ".previous\n" \ | 386 | _ASM_EXTABLE(1b, 3b) \ |
339 | _ASM_EXTABLE(1b,3b) \ | 387 | : "=r"(err) \ |
340 | : "=r"(err) \ | 388 | : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) |
341 | : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) | 389 | |
342 | 390 | ||
343 | 391 | #define __get_user_nocheck(x, ptr, size) \ | |
344 | #define __get_user_nocheck(x,ptr,size) \ | 392 | ({ \ |
345 | ({ \ | 393 | long __gu_err; \ |
346 | long __gu_err; \ | 394 | unsigned long __gu_val; \ |
347 | unsigned long __gu_val; \ | 395 | __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ |
348 | __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ | 396 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
349 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 397 | __gu_err; \ |
350 | __gu_err; \ | ||
351 | }) | 398 | }) |
352 | 399 | ||
353 | extern long __get_user_bad(void); | 400 | extern long __get_user_bad(void); |
354 | 401 | ||
355 | #define __get_user_size(x,ptr,size,retval,errret) \ | 402 | #define __get_user_size(x, ptr, size, retval, errret) \ |
356 | do { \ | 403 | do { \ |
357 | retval = 0; \ | 404 | retval = 0; \ |
358 | __chk_user_ptr(ptr); \ | 405 | __chk_user_ptr(ptr); \ |
359 | switch (size) { \ | 406 | switch (size) { \ |
360 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ | 407 | case 1: \ |
361 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ | 408 | __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ |
362 | case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ | 409 | break; \ |
363 | default: (x) = __get_user_bad(); \ | 410 | case 2: \ |
411 | __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ | ||
412 | break; \ | ||
413 | case 4: \ | ||
414 | __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \ | ||
415 | break; \ | ||
416 | default: \ | ||
417 | (x) = __get_user_bad(); \ | ||
364 | } \ | 418 | } \ |
365 | } while (0) | 419 | } while (0) |
366 | 420 | ||
367 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | 421 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
368 | __asm__ __volatile__( \ | 422 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ |
369 | "1: mov"itype" %2,%"rtype"1\n" \ | 423 | "2:\n" \ |
370 | "2:\n" \ | 424 | ".section .fixup,\"ax\"\n" \ |
371 | ".section .fixup,\"ax\"\n" \ | 425 | "3: movl %3,%0\n" \ |
372 | "3: movl %3,%0\n" \ | 426 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
373 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 427 | " jmp 2b\n" \ |
374 | " jmp 2b\n" \ | 428 | ".previous\n" \ |
375 | ".previous\n" \ | 429 | _ASM_EXTABLE(1b, 3b) \ |
376 | _ASM_EXTABLE(1b,3b) \ | 430 | : "=r" (err), ltype (x) \ |
377 | : "=r"(err), ltype (x) \ | 431 | : "m" (__m(addr)), "i" (errret), "0" (err)) |
378 | : "m"(__m(addr)), "i"(errret), "0"(err)) | 432 | |
379 | 433 | ||
380 | 434 | unsigned long __must_check __copy_to_user_ll | |
381 | unsigned long __must_check __copy_to_user_ll(void __user *to, | 435 | (void __user *to, const void *from, unsigned long n); |
382 | const void *from, unsigned long n); | 436 | unsigned long __must_check __copy_from_user_ll |
383 | unsigned long __must_check __copy_from_user_ll(void *to, | 437 | (void *to, const void __user *from, unsigned long n); |
384 | const void __user *from, unsigned long n); | 438 | unsigned long __must_check __copy_from_user_ll_nozero |
385 | unsigned long __must_check __copy_from_user_ll_nozero(void *to, | 439 | (void *to, const void __user *from, unsigned long n); |
386 | const void __user *from, unsigned long n); | 440 | unsigned long __must_check __copy_from_user_ll_nocache |
387 | unsigned long __must_check __copy_from_user_ll_nocache(void *to, | 441 | (void *to, const void __user *from, unsigned long n); |
388 | const void __user *from, unsigned long n); | 442 | unsigned long __must_check __copy_from_user_ll_nocache_nozero |
389 | unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, | 443 | (void *to, const void __user *from, unsigned long n); |
390 | const void __user *from, unsigned long n); | ||
391 | 444 | ||
392 | /** | 445 | /** |
393 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. | 446 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. |
@@ -416,13 +469,16 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
416 | 469 | ||
417 | switch (n) { | 470 | switch (n) { |
418 | case 1: | 471 | case 1: |
419 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); | 472 | __put_user_size(*(u8 *)from, (u8 __user *)to, |
473 | 1, ret, 1); | ||
420 | return ret; | 474 | return ret; |
421 | case 2: | 475 | case 2: |
422 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); | 476 | __put_user_size(*(u16 *)from, (u16 __user *)to, |
477 | 2, ret, 2); | ||
423 | return ret; | 478 | return ret; |
424 | case 4: | 479 | case 4: |
425 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); | 480 | __put_user_size(*(u32 *)from, (u32 __user *)to, |
481 | 4, ret, 4); | ||
426 | return ret; | 482 | return ret; |
427 | } | 483 | } |
428 | } | 484 | } |
@@ -545,19 +601,21 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, | |||
545 | } | 601 | } |
546 | 602 | ||
547 | static __always_inline unsigned long | 603 | static __always_inline unsigned long |
548 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) | 604 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, |
605 | unsigned long n) | ||
549 | { | 606 | { |
550 | return __copy_from_user_ll_nocache_nozero(to, from, n); | 607 | return __copy_from_user_ll_nocache_nozero(to, from, n); |
551 | } | 608 | } |
552 | 609 | ||
553 | unsigned long __must_check copy_to_user(void __user *to, | 610 | unsigned long __must_check copy_to_user(void __user *to, |
554 | const void *from, unsigned long n); | 611 | const void *from, unsigned long n); |
555 | unsigned long __must_check copy_from_user(void *to, | 612 | unsigned long __must_check copy_from_user(void *to, |
556 | const void __user *from, unsigned long n); | 613 | const void __user *from, |
614 | unsigned long n); | ||
557 | long __must_check strncpy_from_user(char *dst, const char __user *src, | 615 | long __must_check strncpy_from_user(char *dst, const char __user *src, |
558 | long count); | 616 | long count); |
559 | long __must_check __strncpy_from_user(char *dst, | 617 | long __must_check __strncpy_from_user(char *dst, |
560 | const char __user *src, long count); | 618 | const char __user *src, long count); |
561 | 619 | ||
562 | /** | 620 | /** |
563 | * strlen_user: - Get the size of a string in user space. | 621 | * strlen_user: - Get the size of a string in user space. |
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index b87eb4ba8f9d..b8a2f4339903 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -29,23 +29,27 @@ | |||
29 | #define get_fs() (current_thread_info()->addr_limit) | 29 | #define get_fs() (current_thread_info()->addr_limit) |
30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
31 | 31 | ||
32 | #define segment_eq(a,b) ((a).seg == (b).seg) | 32 | #define segment_eq(a, b) ((a).seg == (b).seg) |
33 | 33 | ||
34 | #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg))) | 34 | #define __addr_ok(addr) (!((unsigned long)(addr) & \ |
35 | (current_thread_info()->addr_limit.seg))) | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. | 38 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. |
38 | */ | 39 | */ |
39 | #define __range_not_ok(addr,size) ({ \ | 40 | #define __range_not_ok(addr, size) \ |
40 | unsigned long flag,roksum; \ | 41 | ({ \ |
41 | __chk_user_ptr(addr); \ | 42 | unsigned long flag, roksum; \ |
42 | asm("# range_ok\n\r" \ | 43 | __chk_user_ptr(addr); \ |
43 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ | 44 | asm("# range_ok\n\r" \ |
44 | :"=&r" (flag), "=r" (roksum) \ | 45 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ |
45 | :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \ | 46 | : "=&r" (flag), "=r" (roksum) \ |
46 | flag; }) | 47 | : "1" (addr), "g" ((long)(size)), \ |
48 | "g" (current_thread_info()->addr_limit.seg)); \ | ||
49 | flag; \ | ||
50 | }) | ||
47 | 51 | ||
48 | #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) | 52 | #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0) |
49 | 53 | ||
50 | /* | 54 | /* |
51 | * The exception table consists of pairs of addresses: the first is the | 55 | * The exception table consists of pairs of addresses: the first is the |
@@ -60,8 +64,7 @@ | |||
60 | * on our cache or tlb entries. | 64 | * on our cache or tlb entries. |
61 | */ | 65 | */ |
62 | 66 | ||
63 | struct exception_table_entry | 67 | struct exception_table_entry { |
64 | { | ||
65 | unsigned long insn, fixup; | 68 | unsigned long insn, fixup; |
66 | }; | 69 | }; |
67 | 70 | ||
@@ -84,23 +87,36 @@ extern int fixup_exception(struct pt_regs *regs); | |||
84 | * accesses to the same area of user memory). | 87 | * accesses to the same area of user memory). |
85 | */ | 88 | */ |
86 | 89 | ||
87 | #define __get_user_x(size,ret,x,ptr) \ | 90 | #define __get_user_x(size, ret, x, ptr) \ |
88 | asm volatile("call __get_user_" #size \ | 91 | asm volatile("call __get_user_" #size \ |
89 | :"=a" (ret),"=d" (x) \ | 92 | : "=a" (ret),"=d" (x) \ |
90 | :"c" (ptr) \ | 93 | : "c" (ptr) \ |
91 | :"r8") | 94 | : "r8") |
95 | |||
96 | /* Careful: we have to cast the result to the type of the pointer | ||
97 | * for sign reasons */ | ||
92 | 98 | ||
93 | /* Careful: we have to cast the result to the type of the pointer for sign reasons */ | 99 | #define get_user(x, ptr) \ |
94 | #define get_user(x,ptr) \ | 100 | ({ \ |
95 | ({ unsigned long __val_gu; \ | 101 | unsigned long __val_gu; \ |
96 | int __ret_gu; \ | 102 | int __ret_gu; \ |
97 | __chk_user_ptr(ptr); \ | 103 | __chk_user_ptr(ptr); \ |
98 | switch(sizeof (*(ptr))) { \ | 104 | switch (sizeof(*(ptr))) { \ |
99 | case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ | 105 | case 1: \ |
100 | case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ | 106 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
101 | case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ | 107 | break; \ |
102 | case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \ | 108 | case 2: \ |
103 | default: __get_user_bad(); break; \ | 109 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ |
110 | break; \ | ||
111 | case 4: \ | ||
112 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
113 | break; \ | ||
114 | case 8: \ | ||
115 | __get_user_x(8, __ret_gu, __val_gu, ptr); \ | ||
116 | break; \ | ||
117 | default: \ | ||
118 | __get_user_bad(); \ | ||
119 | break; \ | ||
104 | } \ | 120 | } \ |
105 | (x) = (__force typeof(*(ptr)))__val_gu; \ | 121 | (x) = (__force typeof(*(ptr)))__val_gu; \ |
106 | __ret_gu; \ | 122 | __ret_gu; \ |
@@ -112,55 +128,73 @@ extern void __put_user_4(void); | |||
112 | extern void __put_user_8(void); | 128 | extern void __put_user_8(void); |
113 | extern void __put_user_bad(void); | 129 | extern void __put_user_bad(void); |
114 | 130 | ||
115 | #define __put_user_x(size,ret,x,ptr) \ | 131 | #define __put_user_x(size, ret, x, ptr) \ |
116 | asm volatile("call __put_user_" #size \ | 132 | asm volatile("call __put_user_" #size \ |
117 | :"=a" (ret) \ | 133 | :"=a" (ret) \ |
118 | :"c" (ptr),"d" (x) \ | 134 | :"c" (ptr),"d" (x) \ |
119 | :"r8") | 135 | :"r8") |
120 | 136 | ||
121 | #define put_user(x,ptr) \ | 137 | #define put_user(x, ptr) \ |
122 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 138 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
123 | 139 | ||
124 | #define __get_user(x,ptr) \ | 140 | #define __get_user(x, ptr) \ |
125 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 141 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
126 | #define __put_user(x,ptr) \ | 142 | #define __put_user(x, ptr) \ |
127 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 143 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
128 | 144 | ||
129 | #define __get_user_unaligned __get_user | 145 | #define __get_user_unaligned __get_user |
130 | #define __put_user_unaligned __put_user | 146 | #define __put_user_unaligned __put_user |
131 | 147 | ||
132 | #define __put_user_nocheck(x,ptr,size) \ | 148 | #define __put_user_nocheck(x, ptr, size) \ |
133 | ({ \ | 149 | ({ \ |
134 | int __pu_err; \ | 150 | int __pu_err; \ |
135 | __put_user_size((x),(ptr),(size),__pu_err); \ | 151 | __put_user_size((x), (ptr), (size), __pu_err); \ |
136 | __pu_err; \ | 152 | __pu_err; \ |
137 | }) | 153 | }) |
138 | 154 | ||
139 | 155 | ||
140 | #define __put_user_check(x,ptr,size) \ | 156 | #define __put_user_check(x, ptr, size) \ |
141 | ({ \ | 157 | ({ \ |
142 | int __pu_err; \ | 158 | int __pu_err; \ |
143 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ | 159 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ |
144 | switch (size) { \ | 160 | switch (size) { \ |
145 | case 1: __put_user_x(1,__pu_err,x,__pu_addr); break; \ | 161 | case 1: \ |
146 | case 2: __put_user_x(2,__pu_err,x,__pu_addr); break; \ | 162 | __put_user_x(1, __pu_err, x, __pu_addr); \ |
147 | case 4: __put_user_x(4,__pu_err,x,__pu_addr); break; \ | 163 | break; \ |
148 | case 8: __put_user_x(8,__pu_err,x,__pu_addr); break; \ | 164 | case 2: \ |
149 | default: __put_user_bad(); \ | 165 | __put_user_x(2, __pu_err, x, __pu_addr); \ |
150 | } \ | 166 | break; \ |
151 | __pu_err; \ | 167 | case 4: \ |
168 | __put_user_x(4, __pu_err, x, __pu_addr); \ | ||
169 | break; \ | ||
170 | case 8: \ | ||
171 | __put_user_x(8, __pu_err, x, __pu_addr); \ | ||
172 | break; \ | ||
173 | default: \ | ||
174 | __put_user_bad(); \ | ||
175 | } \ | ||
176 | __pu_err; \ | ||
152 | }) | 177 | }) |
153 | 178 | ||
154 | #define __put_user_size(x,ptr,size,retval) \ | 179 | #define __put_user_size(x, ptr, size, retval) \ |
155 | do { \ | 180 | do { \ |
156 | retval = 0; \ | 181 | retval = 0; \ |
157 | __chk_user_ptr(ptr); \ | 182 | __chk_user_ptr(ptr); \ |
158 | switch (size) { \ | 183 | switch (size) { \ |
159 | case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\ | 184 | case 1: \ |
160 | case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\ | 185 | __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\ |
161 | case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\ | 186 | break; \ |
162 | case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\ | 187 | case 2: \ |
163 | default: __put_user_bad(); \ | 188 | __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\ |
189 | break; \ | ||
190 | case 4: \ | ||
191 | __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\ | ||
192 | break; \ | ||
193 | case 8: \ | ||
194 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \ | ||
195 | break; \ | ||
196 | default: \ | ||
197 | __put_user_bad(); \ | ||
164 | } \ | 198 | } \ |
165 | } while (0) | 199 | } while (0) |
166 | 200 | ||
@@ -174,23 +208,22 @@ struct __large_struct { unsigned long buf[100]; }; | |||
174 | * aliasing issues. | 208 | * aliasing issues. |
175 | */ | 209 | */ |
176 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | 210 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ |
177 | asm volatile( \ | 211 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ |
178 | "1: mov"itype" %"rtype"1,%2\n" \ | 212 | "2:\n" \ |
179 | "2:\n" \ | 213 | ".section .fixup, \"ax\"\n" \ |
180 | ".section .fixup,\"ax\"\n" \ | 214 | "3: mov %3,%0\n" \ |
181 | "3: mov %3,%0\n" \ | 215 | " jmp 2b\n" \ |
182 | " jmp 2b\n" \ | 216 | ".previous\n" \ |
183 | ".previous\n" \ | 217 | _ASM_EXTABLE(1b, 3b) \ |
184 | _ASM_EXTABLE(1b,3b) \ | 218 | : "=r"(err) \ |
185 | : "=r"(err) \ | 219 | : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err)) |
186 | : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err)) | 220 | |
187 | 221 | ||
188 | 222 | #define __get_user_nocheck(x, ptr, size) \ | |
189 | #define __get_user_nocheck(x,ptr,size) \ | ||
190 | ({ \ | 223 | ({ \ |
191 | int __gu_err; \ | 224 | int __gu_err; \ |
192 | unsigned long __gu_val; \ | 225 | unsigned long __gu_val; \ |
193 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ | 226 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
194 | (x) = (__force typeof(*(ptr)))__gu_val; \ | 227 | (x) = (__force typeof(*(ptr)))__gu_val; \ |
195 | __gu_err; \ | 228 | __gu_err; \ |
196 | }) | 229 | }) |
@@ -201,31 +234,39 @@ extern int __get_user_4(void); | |||
201 | extern int __get_user_8(void); | 234 | extern int __get_user_8(void); |
202 | extern int __get_user_bad(void); | 235 | extern int __get_user_bad(void); |
203 | 236 | ||
204 | #define __get_user_size(x,ptr,size,retval) \ | 237 | #define __get_user_size(x, ptr, size, retval) \ |
205 | do { \ | 238 | do { \ |
206 | retval = 0; \ | 239 | retval = 0; \ |
207 | __chk_user_ptr(ptr); \ | 240 | __chk_user_ptr(ptr); \ |
208 | switch (size) { \ | 241 | switch (size) { \ |
209 | case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\ | 242 | case 1: \ |
210 | case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\ | 243 | __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\ |
211 | case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\ | 244 | break; \ |
212 | case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\ | 245 | case 2: \ |
213 | default: (x) = __get_user_bad(); \ | 246 | __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\ |
247 | break; \ | ||
248 | case 4: \ | ||
249 | __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\ | ||
250 | break; \ | ||
251 | case 8: \ | ||
252 | __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \ | ||
253 | break; \ | ||
254 | default: \ | ||
255 | (x) = __get_user_bad(); \ | ||
214 | } \ | 256 | } \ |
215 | } while (0) | 257 | } while (0) |
216 | 258 | ||
217 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | 259 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ |
218 | asm volatile( \ | 260 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ |
219 | "1: mov"itype" %2,%"rtype"1\n" \ | 261 | "2:\n" \ |
220 | "2:\n" \ | 262 | ".section .fixup, \"ax\"\n" \ |
221 | ".section .fixup,\"ax\"\n" \ | 263 | "3: mov %3,%0\n" \ |
222 | "3: mov %3,%0\n" \ | 264 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
223 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | 265 | " jmp 2b\n" \ |
224 | " jmp 2b\n" \ | 266 | ".previous\n" \ |
225 | ".previous\n" \ | 267 | _ASM_EXTABLE(1b, 3b) \ |
226 | _ASM_EXTABLE(1b,3b) \ | 268 | : "=r" (err), ltype (x) \ |
227 | : "=r"(err), ltype (x) \ | 269 | : "m" (__m(addr)), "i"(errno), "0"(err)) |
228 | : "m"(__m(addr)), "i"(errno), "0"(err)) | ||
229 | 270 | ||
230 | /* | 271 | /* |
231 | * Copy To/From Userspace | 272 | * Copy To/From Userspace |
@@ -244,110 +285,142 @@ copy_in_user(void __user *to, const void __user *from, unsigned len); | |||
244 | 285 | ||
245 | static __always_inline __must_check | 286 | static __always_inline __must_check |
246 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 287 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
247 | { | 288 | { |
248 | int ret = 0; | 289 | int ret = 0; |
249 | if (!__builtin_constant_p(size)) | 290 | if (!__builtin_constant_p(size)) |
250 | return copy_user_generic(dst,(__force void *)src,size); | 291 | return copy_user_generic(dst, (__force void *)src, size); |
251 | switch (size) { | 292 | switch (size) { |
252 | case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); | 293 | case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, |
294 | ret, "b", "b", "=q", 1); | ||
253 | return ret; | 295 | return ret; |
254 | case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2); | 296 | case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, |
297 | ret, "w", "w", "=r", 2); | ||
255 | return ret; | 298 | return ret; |
256 | case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4); | 299 | case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, |
300 | ret, "l", "k", "=r", 4); | ||
301 | return ret; | ||
302 | case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, | ||
303 | ret, "q", "", "=r", 8); | ||
257 | return ret; | 304 | return ret; |
258 | case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8); | ||
259 | return ret; | ||
260 | case 10: | 305 | case 10: |
261 | __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); | 306 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
262 | if (unlikely(ret)) return ret; | 307 | ret, "q", "", "=r", 16); |
263 | __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2); | 308 | if (unlikely(ret)) |
264 | return ret; | 309 | return ret; |
310 | __get_user_asm(*(u16 *)(8 + (char *)dst), | ||
311 | (u16 __user *)(8 + (char __user *)src), | ||
312 | ret, "w", "w", "=r", 2); | ||
313 | return ret; | ||
265 | case 16: | 314 | case 16: |
266 | __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16); | 315 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
267 | if (unlikely(ret)) return ret; | 316 | ret, "q", "", "=r", 16); |
268 | __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8); | 317 | if (unlikely(ret)) |
269 | return ret; | 318 | return ret; |
319 | __get_user_asm(*(u64 *)(8 + (char *)dst), | ||
320 | (u64 __user *)(8 + (char __user *)src), | ||
321 | ret, "q", "", "=r", 8); | ||
322 | return ret; | ||
270 | default: | 323 | default: |
271 | return copy_user_generic(dst,(__force void *)src,size); | 324 | return copy_user_generic(dst, (__force void *)src, size); |
272 | } | 325 | } |
273 | } | 326 | } |
274 | 327 | ||
275 | static __always_inline __must_check | 328 | static __always_inline __must_check |
276 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 329 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
277 | { | 330 | { |
278 | int ret = 0; | 331 | int ret = 0; |
279 | if (!__builtin_constant_p(size)) | 332 | if (!__builtin_constant_p(size)) |
280 | return copy_user_generic((__force void *)dst,src,size); | 333 | return copy_user_generic((__force void *)dst, src, size); |
281 | switch (size) { | 334 | switch (size) { |
282 | case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); | 335 | case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, |
336 | ret, "b", "b", "iq", 1); | ||
283 | return ret; | 337 | return ret; |
284 | case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2); | 338 | case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, |
339 | ret, "w", "w", "ir", 2); | ||
285 | return ret; | 340 | return ret; |
286 | case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4); | 341 | case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, |
342 | ret, "l", "k", "ir", 4); | ||
343 | return ret; | ||
344 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, | ||
345 | ret, "q", "", "ir", 8); | ||
287 | return ret; | 346 | return ret; |
288 | case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8); | ||
289 | return ret; | ||
290 | case 10: | 347 | case 10: |
291 | __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10); | 348 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
292 | if (unlikely(ret)) return ret; | 349 | ret, "q", "", "ir", 10); |
350 | if (unlikely(ret)) | ||
351 | return ret; | ||
293 | asm("":::"memory"); | 352 | asm("":::"memory"); |
294 | __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2); | 353 | __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, |
295 | return ret; | 354 | ret, "w", "w", "ir", 2); |
355 | return ret; | ||
296 | case 16: | 356 | case 16: |
297 | __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16); | 357 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
298 | if (unlikely(ret)) return ret; | 358 | ret, "q", "", "ir", 16); |
359 | if (unlikely(ret)) | ||
360 | return ret; | ||
299 | asm("":::"memory"); | 361 | asm("":::"memory"); |
300 | __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8); | 362 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
301 | return ret; | 363 | ret, "q", "", "ir", 8); |
364 | return ret; | ||
302 | default: | 365 | default: |
303 | return copy_user_generic((__force void *)dst,src,size); | 366 | return copy_user_generic((__force void *)dst, src, size); |
304 | } | 367 | } |
305 | } | 368 | } |
306 | 369 | ||
307 | static __always_inline __must_check | 370 | static __always_inline __must_check |
308 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 371 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
309 | { | 372 | { |
310 | int ret = 0; | 373 | int ret = 0; |
311 | if (!__builtin_constant_p(size)) | 374 | if (!__builtin_constant_p(size)) |
312 | return copy_user_generic((__force void *)dst,(__force void *)src,size); | 375 | return copy_user_generic((__force void *)dst, |
313 | switch (size) { | 376 | (__force void *)src, size); |
314 | case 1: { | 377 | switch (size) { |
378 | case 1: { | ||
315 | u8 tmp; | 379 | u8 tmp; |
316 | __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); | 380 | __get_user_asm(tmp, (u8 __user *)src, |
381 | ret, "b", "b", "=q", 1); | ||
317 | if (likely(!ret)) | 382 | if (likely(!ret)) |
318 | __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); | 383 | __put_user_asm(tmp, (u8 __user *)dst, |
384 | ret, "b", "b", "iq", 1); | ||
319 | return ret; | 385 | return ret; |
320 | } | 386 | } |
321 | case 2: { | 387 | case 2: { |
322 | u16 tmp; | 388 | u16 tmp; |
323 | __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); | 389 | __get_user_asm(tmp, (u16 __user *)src, |
390 | ret, "w", "w", "=r", 2); | ||
324 | if (likely(!ret)) | 391 | if (likely(!ret)) |
325 | __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); | 392 | __put_user_asm(tmp, (u16 __user *)dst, |
393 | ret, "w", "w", "ir", 2); | ||
326 | return ret; | 394 | return ret; |
327 | } | 395 | } |
328 | 396 | ||
329 | case 4: { | 397 | case 4: { |
330 | u32 tmp; | 398 | u32 tmp; |
331 | __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); | 399 | __get_user_asm(tmp, (u32 __user *)src, |
400 | ret, "l", "k", "=r", 4); | ||
332 | if (likely(!ret)) | 401 | if (likely(!ret)) |
333 | __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); | 402 | __put_user_asm(tmp, (u32 __user *)dst, |
403 | ret, "l", "k", "ir", 4); | ||
334 | return ret; | 404 | return ret; |
335 | } | 405 | } |
336 | case 8: { | 406 | case 8: { |
337 | u64 tmp; | 407 | u64 tmp; |
338 | __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); | 408 | __get_user_asm(tmp, (u64 __user *)src, |
409 | ret, "q", "", "=r", 8); | ||
339 | if (likely(!ret)) | 410 | if (likely(!ret)) |
340 | __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); | 411 | __put_user_asm(tmp, (u64 __user *)dst, |
412 | ret, "q", "", "ir", 8); | ||
341 | return ret; | 413 | return ret; |
342 | } | 414 | } |
343 | default: | 415 | default: |
344 | return copy_user_generic((__force void *)dst,(__force void *)src,size); | 416 | return copy_user_generic((__force void *)dst, |
417 | (__force void *)src, size); | ||
345 | } | 418 | } |
346 | } | 419 | } |
347 | 420 | ||
348 | __must_check long | 421 | __must_check long |
349 | strncpy_from_user(char *dst, const char __user *src, long count); | 422 | strncpy_from_user(char *dst, const char __user *src, long count); |
350 | __must_check long | 423 | __must_check long |
351 | __strncpy_from_user(char *dst, const char __user *src, long count); | 424 | __strncpy_from_user(char *dst, const char __user *src, long count); |
352 | __must_check long strnlen_user(const char __user *str, long n); | 425 | __must_check long strnlen_user(const char __user *str, long n); |
353 | __must_check long __strnlen_user(const char __user *str, long n); | 426 | __must_check long __strnlen_user(const char __user *str, long n); |
@@ -355,7 +428,8 @@ __must_check long strlen_user(const char __user *str); | |||
355 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); | 428 | __must_check unsigned long clear_user(void __user *mem, unsigned long len); |
356 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); | 429 | __must_check unsigned long __clear_user(void __user *mem, unsigned long len); |
357 | 430 | ||
358 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size); | 431 | __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, |
432 | unsigned size); | ||
359 | 433 | ||
360 | static __must_check __always_inline int | 434 | static __must_check __always_inline int |
361 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | 435 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
@@ -364,15 +438,19 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | |||
364 | } | 438 | } |
365 | 439 | ||
366 | #define ARCH_HAS_NOCACHE_UACCESS 1 | 440 | #define ARCH_HAS_NOCACHE_UACCESS 1 |
367 | extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); | 441 | extern long __copy_user_nocache(void *dst, const void __user *src, |
442 | unsigned size, int zerorest); | ||
368 | 443 | ||
369 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) | 444 | static inline int __copy_from_user_nocache(void *dst, const void __user *src, |
445 | unsigned size) | ||
370 | { | 446 | { |
371 | might_sleep(); | 447 | might_sleep(); |
372 | return __copy_user_nocache(dst, src, size, 1); | 448 | return __copy_user_nocache(dst, src, size, 1); |
373 | } | 449 | } |
374 | 450 | ||
375 | static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) | 451 | static inline int __copy_from_user_inatomic_nocache(void *dst, |
452 | const void __user *src, | ||
453 | unsigned size) | ||
376 | { | 454 | { |
377 | return __copy_user_nocache(dst, src, size, 0); | 455 | return __copy_user_nocache(dst, src, size, 0); |
378 | } | 456 | } |
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index 913598d4f761..d270ffe72759 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h | |||
@@ -32,6 +32,6 @@ | |||
32 | * | 32 | * |
33 | * Note that unaligned accesses can be very expensive on some architectures. | 33 | * Note that unaligned accesses can be very expensive on some architectures. |
34 | */ | 34 | */ |
35 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | 35 | #define put_unaligned(val, ptr) ((void)(*(ptr) = (val))) |
36 | 36 | ||
37 | #endif /* _ASM_X86_UNALIGNED_H */ | 37 | #endif /* _ASM_X86_UNALIGNED_H */ |
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h index 2a58ed3e51d8..effc7ad8e12f 100644 --- a/include/asm-x86/unistd.h +++ b/include/asm-x86/unistd.h | |||
@@ -1,11 +1,5 @@ | |||
1 | #ifdef __KERNEL__ | 1 | #ifdef __KERNEL__ |
2 | # ifdef CONFIG_X86_32 | 2 | # if defined(CONFIG_X86_32) || defined(__i386__) |
3 | # include "unistd_32.h" | ||
4 | # else | ||
5 | # include "unistd_64.h" | ||
6 | # endif | ||
7 | #else | ||
8 | # ifdef __i386__ | ||
9 | # include "unistd_32.h" | 3 | # include "unistd_32.h" |
10 | # else | 4 | # else |
11 | # include "unistd_64.h" | 5 | # include "unistd_64.h" |
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h index 984123a68f7c..8317d94771d3 100644 --- a/include/asm-x86/unistd_32.h +++ b/include/asm-x86/unistd_32.h | |||
@@ -81,7 +81,7 @@ | |||
81 | #define __NR_sigpending 73 | 81 | #define __NR_sigpending 73 |
82 | #define __NR_sethostname 74 | 82 | #define __NR_sethostname 74 |
83 | #define __NR_setrlimit 75 | 83 | #define __NR_setrlimit 75 |
84 | #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ | 84 | #define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ |
85 | #define __NR_getrusage 77 | 85 | #define __NR_getrusage 77 |
86 | #define __NR_gettimeofday 78 | 86 | #define __NR_gettimeofday 78 |
87 | #define __NR_settimeofday 79 | 87 | #define __NR_settimeofday 79 |
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h index 3883ceb54ef5..fe26e36d0f51 100644 --- a/include/asm-x86/unistd_64.h +++ b/include/asm-x86/unistd_64.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_X86_64_UNISTD_H_ | 2 | #define _ASM_X86_64_UNISTD_H_ |
3 | 3 | ||
4 | #ifndef __SYSCALL | 4 | #ifndef __SYSCALL |
5 | #define __SYSCALL(a,b) | 5 | #define __SYSCALL(a, b) |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | /* | 8 | /* |
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h index f769872debea..a3d910047879 100644 --- a/include/asm-x86/user32.h +++ b/include/asm-x86/user32.h | |||
@@ -1,7 +1,8 @@ | |||
1 | #ifndef USER32_H | 1 | #ifndef USER32_H |
2 | #define USER32_H 1 | 2 | #define USER32_H 1 |
3 | 3 | ||
4 | /* IA32 compatible user structures for ptrace. These should be used for 32bit coredumps too. */ | 4 | /* IA32 compatible user structures for ptrace. |
5 | * These should be used for 32bit coredumps too. */ | ||
5 | 6 | ||
6 | struct user_i387_ia32_struct { | 7 | struct user_i387_ia32_struct { |
7 | u32 cwd; | 8 | u32 cwd; |
@@ -42,9 +43,9 @@ struct user_regs_struct32 { | |||
42 | }; | 43 | }; |
43 | 44 | ||
44 | struct user32 { | 45 | struct user32 { |
45 | struct user_regs_struct32 regs; /* Where the registers are actually stored */ | 46 | struct user_regs_struct32 regs; /* Where the registers are actually stored */ |
46 | int u_fpvalid; /* True if math co-processor being used. */ | 47 | int u_fpvalid; /* True if math co-processor being used. */ |
47 | /* for this mess. Not yet used. */ | 48 | /* for this mess. Not yet used. */ |
48 | struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ | 49 | struct user_i387_ia32_struct i387; /* Math Co-processor registers. */ |
49 | /* The rest of this junk is to help gdb figure out what goes where */ | 50 | /* The rest of this junk is to help gdb figure out what goes where */ |
50 | __u32 u_tsize; /* Text segment size (pages). */ | 51 | __u32 u_tsize; /* Text segment size (pages). */ |
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h index 6157da6f882c..d6e51edc259d 100644 --- a/include/asm-x86/user_32.h +++ b/include/asm-x86/user_32.h | |||
@@ -100,10 +100,10 @@ struct user_regs_struct { | |||
100 | struct user{ | 100 | struct user{ |
101 | /* We start with the registers, to mimic the way that "memory" is returned | 101 | /* We start with the registers, to mimic the way that "memory" is returned |
102 | from the ptrace(3,...) function. */ | 102 | from the ptrace(3,...) function. */ |
103 | struct user_regs_struct regs; /* Where the registers are actually stored */ | 103 | struct user_regs_struct regs; /* Where the registers are actually stored */ |
104 | /* ptrace does not yet supply these. Someday.... */ | 104 | /* ptrace does not yet supply these. Someday.... */ |
105 | int u_fpvalid; /* True if math co-processor being used. */ | 105 | int u_fpvalid; /* True if math co-processor being used. */ |
106 | /* for this mess. Not yet used. */ | 106 | /* for this mess. Not yet used. */ |
107 | struct user_i387_struct i387; /* Math Co-processor registers. */ | 107 | struct user_i387_struct i387; /* Math Co-processor registers. */ |
108 | /* The rest of this junk is to help gdb figure out what goes where */ | 108 | /* The rest of this junk is to help gdb figure out what goes where */ |
109 | unsigned long int u_tsize; /* Text segment size (pages). */ | 109 | unsigned long int u_tsize; /* Text segment size (pages). */ |
@@ -118,7 +118,7 @@ struct user{ | |||
118 | int reserved; /* No longer used */ | 118 | int reserved; /* No longer used */ |
119 | unsigned long u_ar0; /* Used by gdb to help find the values for */ | 119 | unsigned long u_ar0; /* Used by gdb to help find the values for */ |
120 | /* the registers. */ | 120 | /* the registers. */ |
121 | struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ | 121 | struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ |
122 | unsigned long magic; /* To uniquely identify a core file */ | 122 | unsigned long magic; /* To uniquely identify a core file */ |
123 | char u_comm[32]; /* User command that was responsible */ | 123 | char u_comm[32]; /* User command that was responsible */ |
124 | int u_debugreg[8]; | 124 | int u_debugreg[8]; |
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h index 963616455609..6037b634c77f 100644 --- a/include/asm-x86/user_64.h +++ b/include/asm-x86/user_64.h | |||
@@ -45,12 +45,13 @@ | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | /* This matches the 64bit FXSAVE format as defined by AMD. It is the same | 47 | /* This matches the 64bit FXSAVE format as defined by AMD. It is the same |
48 | as the 32bit format defined by Intel, except that the selector:offset pairs for | 48 | as the 32bit format defined by Intel, except that the selector:offset pairs |
49 | data and eip are replaced with flat 64bit pointers. */ | 49 | for data and eip are replaced with flat 64bit pointers. */ |
50 | struct user_i387_struct { | 50 | struct user_i387_struct { |
51 | unsigned short cwd; | 51 | unsigned short cwd; |
52 | unsigned short swd; | 52 | unsigned short swd; |
53 | unsigned short twd; /* Note this is not the same as the 32bit/x87/FSAVE twd */ | 53 | unsigned short twd; /* Note this is not the same as |
54 | the 32bit/x87/FSAVE twd */ | ||
54 | unsigned short fop; | 55 | unsigned short fop; |
55 | __u64 rip; | 56 | __u64 rip; |
56 | __u64 rdp; | 57 | __u64 rdp; |
@@ -97,13 +98,14 @@ struct user_regs_struct { | |||
97 | /* When the kernel dumps core, it starts by dumping the user struct - | 98 | /* When the kernel dumps core, it starts by dumping the user struct - |
98 | this will be used by gdb to figure out where the data and stack segments | 99 | this will be used by gdb to figure out where the data and stack segments |
99 | are within the file, and what virtual addresses to use. */ | 100 | are within the file, and what virtual addresses to use. */ |
100 | struct user{ | 101 | |
102 | struct user { | ||
101 | /* We start with the registers, to mimic the way that "memory" is returned | 103 | /* We start with the registers, to mimic the way that "memory" is returned |
102 | from the ptrace(3,...) function. */ | 104 | from the ptrace(3,...) function. */ |
103 | struct user_regs_struct regs; /* Where the registers are actually stored */ | 105 | struct user_regs_struct regs; /* Where the registers are actually stored */ |
104 | /* ptrace does not yet supply these. Someday.... */ | 106 | /* ptrace does not yet supply these. Someday.... */ |
105 | int u_fpvalid; /* True if math co-processor being used. */ | 107 | int u_fpvalid; /* True if math co-processor being used. */ |
106 | /* for this mess. Not yet used. */ | 108 | /* for this mess. Not yet used. */ |
107 | int pad0; | 109 | int pad0; |
108 | struct user_i387_struct i387; /* Math Co-processor registers. */ | 110 | struct user_i387_struct i387; /* Math Co-processor registers. */ |
109 | /* The rest of this junk is to help gdb figure out what goes where */ | 111 | /* The rest of this junk is to help gdb figure out what goes where */ |
@@ -120,7 +122,7 @@ struct user{ | |||
120 | int pad1; | 122 | int pad1; |
121 | unsigned long u_ar0; /* Used by gdb to help find the values for */ | 123 | unsigned long u_ar0; /* Used by gdb to help find the values for */ |
122 | /* the registers. */ | 124 | /* the registers. */ |
123 | struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ | 125 | struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */ |
124 | unsigned long magic; /* To uniquely identify a core file */ | 126 | unsigned long magic; /* To uniquely identify a core file */ |
125 | char u_comm[32]; /* User command that was responsible */ | 127 | char u_comm[32]; /* User command that was responsible */ |
126 | unsigned long u_debugreg[8]; | 128 | unsigned long u_debugreg[8]; |
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h new file mode 100644 index 000000000000..26b9240d1e23 --- /dev/null +++ b/include/asm-x86/uv/uv_hub.h | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV architectural definitions | ||
7 | * | ||
8 | * Copyright (C) 2007 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_X86_UV_HUB_H__ | ||
12 | #define __ASM_X86_UV_HUB_H__ | ||
13 | |||
14 | #include <linux/numa.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <asm/types.h> | ||
17 | #include <asm/percpu.h> | ||
18 | |||
19 | |||
20 | /* | ||
21 | * Addressing Terminology | ||
22 | * | ||
23 | * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | ||
24 | * routers always have low bit of 1, C/MBricks have low bit | ||
25 | * equal to 0. Most addressing macros that target UV hub chips | ||
26 | * right shift the NASID by 1 to exclude the always-zero bit. | ||
27 | * | ||
28 | * SNASID - NASID right shifted by 1 bit. | ||
29 | * | ||
30 | * | ||
31 | * Memory/UV-HUB Processor Socket Address Format: | ||
32 | * +--------+---------------+---------------------+ | ||
33 | * |00..0000| SNASID | NodeOffset | | ||
34 | * +--------+---------------+---------------------+ | ||
35 | * <--- N bits --->|<--------M bits -----> | ||
36 | * | ||
37 | * M number of node offset bits (35 .. 40) | ||
38 | * N number of SNASID bits (0 .. 10) | ||
39 | * | ||
40 | * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). | ||
41 | * The actual values are configuration dependent and are set at | ||
42 | * boot time | ||
43 | * | ||
44 | * APICID format | ||
45 | * NOTE!!!!!! This is the current format of the APICID. However, code | ||
46 | * should assume that this will change in the future. Use functions | ||
47 | * in this file for all APICID bit manipulations and conversion. | ||
48 | * | ||
49 | * 1111110000000000 | ||
50 | * 5432109876543210 | ||
51 | * nnnnnnnnnnlc0cch | ||
52 | * sssssssssss | ||
53 | * | ||
54 | * n = snasid bits | ||
55 | * l = socket number on board | ||
56 | * c = core | ||
57 | * h = hyperthread | ||
58 | * s = bits that are in the socket CSR | ||
59 | * | ||
60 | * Note: Processor only supports 12 bits in the APICID register. The ACPI | ||
61 | * tables hold all 16 bits. Software needs to be aware of this. | ||
62 | * | ||
63 | * Unless otherwise specified, all references to APICID refer to | ||
64 | * the FULL value contained in ACPI tables, not the subset in the | ||
65 | * processor APICID register. | ||
66 | */ | ||
67 | |||
68 | |||
69 | /* | ||
70 | * Maximum number of bricks in all partitions and in all coherency domains. | ||
71 | * This is the total number of bricks accessible in the numalink fabric. It | ||
72 | * includes all C & M bricks. Routers are NOT included. | ||
73 | * | ||
74 | * This value is also the value of the maximum number of non-router NASIDs | ||
75 | * in the numalink fabric. | ||
76 | * | ||
77 | * NOTE: a brick may be 1 or 2 OS nodes. Don't get these confused. | ||
78 | */ | ||
79 | #define UV_MAX_NUMALINK_BLADES 16384 | ||
80 | |||
81 | /* | ||
82 | * Maximum number of C/Mbricks within a software SSI (hardware may support | ||
83 | * more). | ||
84 | */ | ||
85 | #define UV_MAX_SSI_BLADES 256 | ||
86 | |||
87 | /* | ||
88 | * The largest possible NASID of a C or M brick (+ 2) | ||
89 | */ | ||
90 | #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2) | ||
91 | |||
92 | /* | ||
93 | * The following defines attributes of the HUB chip. These attributes are | ||
94 | * frequently referenced and are kept in the per-cpu data areas of each cpu. | ||
95 | * They are kept together in a struct to minimize cache misses. | ||
96 | */ | ||
97 | struct uv_hub_info_s { | ||
98 | unsigned long global_mmr_base; | ||
99 | unsigned short local_nasid; | ||
100 | unsigned short gnode_upper; | ||
101 | unsigned short coherency_domain_number; | ||
102 | unsigned short numa_blade_id; | ||
103 | unsigned char blade_processor_id; | ||
104 | unsigned char m_val; | ||
105 | unsigned char n_val; | ||
106 | }; | ||
107 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | ||
108 | #define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | ||
109 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | ||
110 | |||
111 | /* | ||
112 | * Local & Global MMR space macros. | ||
113 | * Note: macros are intended to be used ONLY by inline functions | ||
114 | * in this file - not by other kernel code. | ||
115 | */ | ||
116 | #define UV_SNASID(n) ((n) >> 1) | ||
117 | #define UV_NASID(n) ((n) << 1) | ||
118 | |||
119 | #define UV_LOCAL_MMR_BASE 0xf4000000UL | ||
120 | #define UV_GLOBAL_MMR32_BASE 0xf8000000UL | ||
121 | #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) | ||
122 | |||
123 | #define UV_GLOBAL_MMR32_SNASID_MASK 0x3ff | ||
124 | #define UV_GLOBAL_MMR32_SNASID_SHIFT 15 | ||
125 | #define UV_GLOBAL_MMR64_SNASID_SHIFT 26 | ||
126 | |||
127 | #define UV_GLOBAL_MMR32_NASID_BITS(n) \ | ||
128 | (((UV_SNASID(n) & UV_GLOBAL_MMR32_SNASID_MASK)) << \ | ||
129 | (UV_GLOBAL_MMR32_SNASID_SHIFT)) | ||
130 | |||
131 | #define UV_GLOBAL_MMR64_NASID_BITS(n) \ | ||
132 | ((unsigned long)UV_SNASID(n) << UV_GLOBAL_MMR64_SNASID_SHIFT) | ||
133 | |||
134 | #define UV_APIC_NASID_SHIFT 6 | ||
135 | |||
136 | /* | ||
137 | * Extract a NASID from an APICID (full apicid, not processor subset) | ||
138 | */ | ||
139 | static inline int uv_apicid_to_nasid(int apicid) | ||
140 | { | ||
141 | return (UV_NASID(apicid >> UV_APIC_NASID_SHIFT)); | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Access global MMRs using the low memory MMR32 space. This region supports | ||
146 | * faster MMR access but not all MMRs are accessible in this space. | ||
147 | */ | ||
148 | static inline unsigned long *uv_global_mmr32_address(int nasid, | ||
149 | unsigned long offset) | ||
150 | { | ||
151 | return __va(UV_GLOBAL_MMR32_BASE | | ||
152 | UV_GLOBAL_MMR32_NASID_BITS(nasid) | offset); | ||
153 | } | ||
154 | |||
155 | static inline void uv_write_global_mmr32(int nasid, unsigned long offset, | ||
156 | unsigned long val) | ||
157 | { | ||
158 | *uv_global_mmr32_address(nasid, offset) = val; | ||
159 | } | ||
160 | |||
161 | static inline unsigned long uv_read_global_mmr32(int nasid, | ||
162 | unsigned long offset) | ||
163 | { | ||
164 | return *uv_global_mmr32_address(nasid, offset); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Access Global MMR space using the MMR space located at the top of physical | ||
169 | * memory. | ||
170 | */ | ||
171 | static inline unsigned long *uv_global_mmr64_address(int nasid, | ||
172 | unsigned long offset) | ||
173 | { | ||
174 | return __va(UV_GLOBAL_MMR64_BASE | | ||
175 | UV_GLOBAL_MMR64_NASID_BITS(nasid) | offset); | ||
176 | } | ||
177 | |||
178 | static inline void uv_write_global_mmr64(int nasid, unsigned long offset, | ||
179 | unsigned long val) | ||
180 | { | ||
181 | *uv_global_mmr64_address(nasid, offset) = val; | ||
182 | } | ||
183 | |||
184 | static inline unsigned long uv_read_global_mmr64(int nasid, | ||
185 | unsigned long offset) | ||
186 | { | ||
187 | return *uv_global_mmr64_address(nasid, offset); | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Access node local MMRs. Faster than using global space but only local MMRs | ||
192 | * are accessible. | ||
193 | */ | ||
194 | static inline unsigned long *uv_local_mmr_address(unsigned long offset) | ||
195 | { | ||
196 | return __va(UV_LOCAL_MMR_BASE | offset); | ||
197 | } | ||
198 | |||
199 | static inline unsigned long uv_read_local_mmr(unsigned long offset) | ||
200 | { | ||
201 | return *uv_local_mmr_address(offset); | ||
202 | } | ||
203 | |||
204 | static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) | ||
205 | { | ||
206 | *uv_local_mmr_address(offset) = val; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Structures and definitions for converting between cpu, node, and blade | ||
211 | * numbers. | ||
212 | */ | ||
213 | struct uv_blade_info { | ||
214 | unsigned short nr_posible_cpus; | ||
215 | unsigned short nr_online_cpus; | ||
216 | unsigned short nasid; | ||
217 | }; | ||
218 | struct uv_blade_info *uv_blade_info; | ||
219 | extern short *uv_node_to_blade; | ||
220 | extern short *uv_cpu_to_blade; | ||
221 | extern short uv_possible_blades; | ||
222 | |||
223 | /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ | ||
224 | static inline int uv_blade_processor_id(void) | ||
225 | { | ||
226 | return uv_hub_info->blade_processor_id; | ||
227 | } | ||
228 | |||
229 | /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ | ||
230 | static inline int uv_numa_blade_id(void) | ||
231 | { | ||
232 | return uv_hub_info->numa_blade_id; | ||
233 | } | ||
234 | |||
235 | /* Convert a cpu number to the the UV blade number */ | ||
236 | static inline int uv_cpu_to_blade_id(int cpu) | ||
237 | { | ||
238 | return uv_cpu_to_blade[cpu]; | ||
239 | } | ||
240 | |||
241 | /* Convert linux node number to the UV blade number */ | ||
242 | static inline int uv_node_to_blade_id(int nid) | ||
243 | { | ||
244 | return uv_node_to_blade[nid]; | ||
245 | } | ||
246 | |||
247 | /* Convert a blade id to the NASID of the blade */ | ||
248 | static inline int uv_blade_to_nasid(int bid) | ||
249 | { | ||
250 | return uv_blade_info[bid].nasid; | ||
251 | } | ||
252 | |||
253 | /* Determine the number of possible cpus on a blade */ | ||
254 | static inline int uv_blade_nr_possible_cpus(int bid) | ||
255 | { | ||
256 | return uv_blade_info[bid].nr_posible_cpus; | ||
257 | } | ||
258 | |||
259 | /* Determine the number of online cpus on a blade */ | ||
260 | static inline int uv_blade_nr_online_cpus(int bid) | ||
261 | { | ||
262 | return uv_blade_info[bid].nr_online_cpus; | ||
263 | } | ||
264 | |||
265 | /* Convert a cpu id to the NASID of the blade containing the cpu */ | ||
266 | static inline int uv_cpu_to_nasid(int cpu) | ||
267 | { | ||
268 | return uv_blade_info[uv_cpu_to_blade_id(cpu)].nasid; | ||
269 | } | ||
270 | |||
271 | /* Convert a node number to the NASID of the blade */ | ||
272 | static inline int uv_node_to_nasid(int nid) | ||
273 | { | ||
274 | return uv_blade_info[uv_node_to_blade_id(nid)].nasid; | ||
275 | } | ||
276 | |||
277 | /* Maximum possible number of blades */ | ||
278 | static inline int uv_num_possible_blades(void) | ||
279 | { | ||
280 | return uv_possible_blades; | ||
281 | } | ||
282 | |||
283 | #endif /* __ASM_X86_UV_HUB__ */ | ||
284 | |||
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h new file mode 100644 index 000000000000..3b69fe6b6376 --- /dev/null +++ b/include/asm-x86/uv/uv_mmrs.h | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * SGI UV MMR definitions | ||
7 | * | ||
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ASM_X86_UV_MMRS__ | ||
12 | #define __ASM_X86_UV_MMRS__ | ||
13 | |||
14 | /* | ||
15 | * AUTO GENERATED - Do not edit | ||
16 | */ | ||
17 | |||
18 | #define UV_MMR_ENABLE (1UL << 63) | ||
19 | |||
20 | /* ========================================================================= */ | ||
21 | /* UVH_IPI_INT */ | ||
22 | /* ========================================================================= */ | ||
23 | #define UVH_IPI_INT 0x60500UL | ||
24 | #define UVH_IPI_INT_32 0x0360 | ||
25 | |||
26 | #define UVH_IPI_INT_VECTOR_SHFT 0 | ||
27 | #define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL | ||
28 | #define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 | ||
29 | #define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL | ||
30 | #define UVH_IPI_INT_DESTMODE_SHFT 11 | ||
31 | #define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL | ||
32 | #define UVH_IPI_INT_APIC_ID_SHFT 16 | ||
33 | #define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL | ||
34 | #define UVH_IPI_INT_SEND_SHFT 63 | ||
35 | #define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL | ||
36 | |||
37 | union uvh_ipi_int_u { | ||
38 | unsigned long v; | ||
39 | struct uvh_ipi_int_s { | ||
40 | unsigned long vector_ : 8; /* RW */ | ||
41 | unsigned long delivery_mode : 3; /* RW */ | ||
42 | unsigned long destmode : 1; /* RW */ | ||
43 | unsigned long rsvd_12_15 : 4; /* */ | ||
44 | unsigned long apic_id : 32; /* RW */ | ||
45 | unsigned long rsvd_48_62 : 15; /* */ | ||
46 | unsigned long send : 1; /* WP */ | ||
47 | } s; | ||
48 | }; | ||
49 | |||
50 | /* ========================================================================= */ | ||
51 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ | ||
52 | /* ========================================================================= */ | ||
53 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL | ||
54 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x009f0 | ||
55 | |||
56 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 | ||
57 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL | ||
58 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 | ||
59 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL | ||
60 | |||
61 | union uvh_lb_bau_intd_payload_queue_first_u { | ||
62 | unsigned long v; | ||
63 | struct uvh_lb_bau_intd_payload_queue_first_s { | ||
64 | unsigned long rsvd_0_3: 4; /* */ | ||
65 | unsigned long address : 39; /* RW */ | ||
66 | unsigned long rsvd_43_48: 6; /* */ | ||
67 | unsigned long node_id : 14; /* RW */ | ||
68 | unsigned long rsvd_63 : 1; /* */ | ||
69 | } s; | ||
70 | }; | ||
71 | |||
72 | /* ========================================================================= */ | ||
73 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ | ||
74 | /* ========================================================================= */ | ||
75 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL | ||
76 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x009f8 | ||
77 | |||
78 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 | ||
79 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL | ||
80 | |||
81 | union uvh_lb_bau_intd_payload_queue_last_u { | ||
82 | unsigned long v; | ||
83 | struct uvh_lb_bau_intd_payload_queue_last_s { | ||
84 | unsigned long rsvd_0_3: 4; /* */ | ||
85 | unsigned long address : 39; /* RW */ | ||
86 | unsigned long rsvd_43_63: 21; /* */ | ||
87 | } s; | ||
88 | }; | ||
89 | |||
90 | /* ========================================================================= */ | ||
91 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ | ||
92 | /* ========================================================================= */ | ||
93 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL | ||
94 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x00a00 | ||
95 | |||
96 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 | ||
97 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL | ||
98 | |||
99 | union uvh_lb_bau_intd_payload_queue_tail_u { | ||
100 | unsigned long v; | ||
101 | struct uvh_lb_bau_intd_payload_queue_tail_s { | ||
102 | unsigned long rsvd_0_3: 4; /* */ | ||
103 | unsigned long address : 39; /* RW */ | ||
104 | unsigned long rsvd_43_63: 21; /* */ | ||
105 | } s; | ||
106 | }; | ||
107 | |||
108 | /* ========================================================================= */ | ||
109 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ | ||
110 | /* ========================================================================= */ | ||
111 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL | ||
112 | |||
113 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 | ||
114 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL | ||
115 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 | ||
116 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL | ||
117 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2 | ||
118 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL | ||
119 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3 | ||
120 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL | ||
121 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4 | ||
122 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL | ||
123 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5 | ||
124 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL | ||
125 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6 | ||
126 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL | ||
127 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7 | ||
128 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL | ||
129 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8 | ||
130 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL | ||
131 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9 | ||
132 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL | ||
133 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10 | ||
134 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL | ||
135 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11 | ||
136 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL | ||
137 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12 | ||
138 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL | ||
139 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13 | ||
140 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL | ||
141 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14 | ||
142 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL | ||
143 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15 | ||
144 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL | ||
145 | union uvh_lb_bau_intd_software_acknowledge_u { | ||
146 | unsigned long v; | ||
147 | struct uvh_lb_bau_intd_software_acknowledge_s { | ||
148 | unsigned long pending_0 : 1; /* RW, W1C */ | ||
149 | unsigned long pending_1 : 1; /* RW, W1C */ | ||
150 | unsigned long pending_2 : 1; /* RW, W1C */ | ||
151 | unsigned long pending_3 : 1; /* RW, W1C */ | ||
152 | unsigned long pending_4 : 1; /* RW, W1C */ | ||
153 | unsigned long pending_5 : 1; /* RW, W1C */ | ||
154 | unsigned long pending_6 : 1; /* RW, W1C */ | ||
155 | unsigned long pending_7 : 1; /* RW, W1C */ | ||
156 | unsigned long timeout_0 : 1; /* RW, W1C */ | ||
157 | unsigned long timeout_1 : 1; /* RW, W1C */ | ||
158 | unsigned long timeout_2 : 1; /* RW, W1C */ | ||
159 | unsigned long timeout_3 : 1; /* RW, W1C */ | ||
160 | unsigned long timeout_4 : 1; /* RW, W1C */ | ||
161 | unsigned long timeout_5 : 1; /* RW, W1C */ | ||
162 | unsigned long timeout_6 : 1; /* RW, W1C */ | ||
163 | unsigned long timeout_7 : 1; /* RW, W1C */ | ||
164 | unsigned long rsvd_16_63: 48; /* */ | ||
165 | } s; | ||
166 | }; | ||
167 | |||
168 | /* ========================================================================= */ | ||
169 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ | ||
170 | /* ========================================================================= */ | ||
171 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL | ||
172 | |||
173 | /* ========================================================================= */ | ||
174 | /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ | ||
175 | /* ========================================================================= */ | ||
176 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL | ||
177 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x009d8 | ||
178 | |||
179 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 | ||
180 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL | ||
181 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 | ||
182 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL | ||
183 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63 | ||
184 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL | ||
185 | |||
186 | union uvh_lb_bau_sb_activation_control_u { | ||
187 | unsigned long v; | ||
188 | struct uvh_lb_bau_sb_activation_control_s { | ||
189 | unsigned long index : 6; /* RW */ | ||
190 | unsigned long rsvd_6_61: 56; /* */ | ||
191 | unsigned long push : 1; /* WP */ | ||
192 | unsigned long init : 1; /* WP */ | ||
193 | } s; | ||
194 | }; | ||
195 | |||
196 | /* ========================================================================= */ | ||
197 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ | ||
198 | /* ========================================================================= */ | ||
199 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL | ||
200 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x009e0 | ||
201 | |||
202 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 | ||
203 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL | ||
204 | |||
205 | union uvh_lb_bau_sb_activation_status_0_u { | ||
206 | unsigned long v; | ||
207 | struct uvh_lb_bau_sb_activation_status_0_s { | ||
208 | unsigned long status : 64; /* RW */ | ||
209 | } s; | ||
210 | }; | ||
211 | |||
212 | /* ========================================================================= */ | ||
213 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ | ||
214 | /* ========================================================================= */ | ||
215 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL | ||
216 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x009e8 | ||
217 | |||
218 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 | ||
219 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL | ||
220 | |||
221 | union uvh_lb_bau_sb_activation_status_1_u { | ||
222 | unsigned long v; | ||
223 | struct uvh_lb_bau_sb_activation_status_1_s { | ||
224 | unsigned long status : 64; /* RW */ | ||
225 | } s; | ||
226 | }; | ||
227 | |||
228 | /* ========================================================================= */ | ||
229 | /* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ | ||
230 | /* ========================================================================= */ | ||
231 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL | ||
232 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x009d0 | ||
233 | |||
234 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 | ||
235 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL | ||
236 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 | ||
237 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL | ||
238 | |||
239 | union uvh_lb_bau_sb_descriptor_base_u { | ||
240 | unsigned long v; | ||
241 | struct uvh_lb_bau_sb_descriptor_base_s { | ||
242 | unsigned long rsvd_0_11 : 12; /* */ | ||
243 | unsigned long page_address : 31; /* RW */ | ||
244 | unsigned long rsvd_43_48 : 6; /* */ | ||
245 | unsigned long node_id : 14; /* RW */ | ||
246 | unsigned long rsvd_63 : 1; /* */ | ||
247 | } s; | ||
248 | }; | ||
249 | |||
250 | /* ========================================================================= */ | ||
251 | /* UVH_NODE_ID */ | ||
252 | /* ========================================================================= */ | ||
253 | #define UVH_NODE_ID 0x0UL | ||
254 | |||
255 | #define UVH_NODE_ID_FORCE1_SHFT 0 | ||
256 | #define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL | ||
257 | #define UVH_NODE_ID_MANUFACTURER_SHFT 1 | ||
258 | #define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL | ||
259 | #define UVH_NODE_ID_PART_NUMBER_SHFT 12 | ||
260 | #define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL | ||
261 | #define UVH_NODE_ID_REVISION_SHFT 28 | ||
262 | #define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL | ||
263 | #define UVH_NODE_ID_NODE_ID_SHFT 32 | ||
264 | #define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL | ||
265 | #define UVH_NODE_ID_NODES_PER_BIT_SHFT 48 | ||
266 | #define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL | ||
267 | #define UVH_NODE_ID_NI_PORT_SHFT 56 | ||
268 | #define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL | ||
269 | |||
270 | union uvh_node_id_u { | ||
271 | unsigned long v; | ||
272 | struct uvh_node_id_s { | ||
273 | unsigned long force1 : 1; /* RO */ | ||
274 | unsigned long manufacturer : 11; /* RO */ | ||
275 | unsigned long part_number : 16; /* RO */ | ||
276 | unsigned long revision : 4; /* RO */ | ||
277 | unsigned long node_id : 15; /* RW */ | ||
278 | unsigned long rsvd_47 : 1; /* */ | ||
279 | unsigned long nodes_per_bit : 7; /* RW */ | ||
280 | unsigned long rsvd_55 : 1; /* */ | ||
281 | unsigned long ni_port : 4; /* RO */ | ||
282 | unsigned long rsvd_60_63 : 4; /* */ | ||
283 | } s; | ||
284 | }; | ||
285 | |||
286 | /* ========================================================================= */ | ||
287 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ | ||
288 | /* ========================================================================= */ | ||
289 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | ||
290 | |||
291 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | ||
292 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL | ||
293 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 46 | ||
294 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0000400000000000UL | ||
295 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 | ||
296 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | ||
297 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
298 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
299 | |||
300 | union uvh_rh_gam_gru_overlay_config_mmr_u { | ||
301 | unsigned long v; | ||
302 | struct uvh_rh_gam_gru_overlay_config_mmr_s { | ||
303 | unsigned long rsvd_0_27: 28; /* */ | ||
304 | unsigned long base : 18; /* RW */ | ||
305 | unsigned long gr4 : 1; /* RW */ | ||
306 | unsigned long rsvd_47_51: 5; /* */ | ||
307 | unsigned long n_gru : 4; /* RW */ | ||
308 | unsigned long rsvd_56_62: 7; /* */ | ||
309 | unsigned long enable : 1; /* RW */ | ||
310 | } s; | ||
311 | }; | ||
312 | |||
313 | /* ========================================================================= */ | ||
314 | /* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ | ||
315 | /* ========================================================================= */ | ||
316 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL | ||
317 | |||
318 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | ||
319 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL | ||
320 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 | ||
321 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL | ||
322 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
323 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
324 | |||
325 | union uvh_rh_gam_mmr_overlay_config_mmr_u { | ||
326 | unsigned long v; | ||
327 | struct uvh_rh_gam_mmr_overlay_config_mmr_s { | ||
328 | unsigned long rsvd_0_25: 26; /* */ | ||
329 | unsigned long base : 20; /* RW */ | ||
330 | unsigned long dual_hub : 1; /* RW */ | ||
331 | unsigned long rsvd_47_62: 16; /* */ | ||
332 | unsigned long enable : 1; /* RW */ | ||
333 | } s; | ||
334 | }; | ||
335 | |||
336 | /* ========================================================================= */ | ||
337 | /* UVH_RTC */ | ||
338 | /* ========================================================================= */ | ||
339 | #define UVH_RTC 0x28000UL | ||
340 | |||
341 | #define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 | ||
342 | #define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL | ||
343 | |||
344 | union uvh_rtc_u { | ||
345 | unsigned long v; | ||
346 | struct uvh_rtc_s { | ||
347 | unsigned long real_time_clock : 56; /* RW */ | ||
348 | unsigned long rsvd_56_63 : 8; /* */ | ||
349 | } s; | ||
350 | }; | ||
351 | |||
352 | /* ========================================================================= */ | ||
353 | /* UVH_SI_ADDR_MAP_CONFIG */ | ||
354 | /* ========================================================================= */ | ||
355 | #define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL | ||
356 | |||
357 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 | ||
358 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL | ||
359 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 | ||
360 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL | ||
361 | |||
362 | union uvh_si_addr_map_config_u { | ||
363 | unsigned long v; | ||
364 | struct uvh_si_addr_map_config_s { | ||
365 | unsigned long m_skt : 6; /* RW */ | ||
366 | unsigned long rsvd_6_7: 2; /* */ | ||
367 | unsigned long n_skt : 4; /* RW */ | ||
368 | unsigned long rsvd_12_63: 52; /* */ | ||
369 | } s; | ||
370 | }; | ||
371 | |||
372 | |||
373 | #endif /* __ASM_X86_UV_MMRS__ */ | ||
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h index 629bcb6e8e45..86e085e003d2 100644 --- a/include/asm-x86/vdso.h +++ b/include/asm-x86/vdso.h | |||
@@ -8,9 +8,11 @@ extern const char VDSO64_PRELINK[]; | |||
8 | * Given a pointer to the vDSO image, find the pointer to VDSO64_name | 8 | * Given a pointer to the vDSO image, find the pointer to VDSO64_name |
9 | * as that symbol is defined in the vDSO sources or linker script. | 9 | * as that symbol is defined in the vDSO sources or linker script. |
10 | */ | 10 | */ |
11 | #define VDSO64_SYMBOL(base, name) ({ \ | 11 | #define VDSO64_SYMBOL(base, name) \ |
12 | extern const char VDSO64_##name[]; \ | 12 | ({ \ |
13 | (void *) (VDSO64_##name - VDSO64_PRELINK + (unsigned long) (base)); }) | 13 | extern const char VDSO64_##name[]; \ |
14 | (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \ | ||
15 | }) | ||
14 | #endif | 16 | #endif |
15 | 17 | ||
16 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT | 18 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT |
@@ -20,9 +22,18 @@ extern const char VDSO32_PRELINK[]; | |||
20 | * Given a pointer to the vDSO image, find the pointer to VDSO32_name | 22 | * Given a pointer to the vDSO image, find the pointer to VDSO32_name |
21 | * as that symbol is defined in the vDSO sources or linker script. | 23 | * as that symbol is defined in the vDSO sources or linker script. |
22 | */ | 24 | */ |
23 | #define VDSO32_SYMBOL(base, name) ({ \ | 25 | #define VDSO32_SYMBOL(base, name) \ |
24 | extern const char VDSO32_##name[]; \ | 26 | ({ \ |
25 | (void *) (VDSO32_##name - VDSO32_PRELINK + (unsigned long) (base)); }) | 27 | extern const char VDSO32_##name[]; \ |
28 | (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ | ||
29 | }) | ||
26 | #endif | 30 | #endif |
27 | 31 | ||
32 | /* | ||
33 | * These symbols are defined with the addresses in the vsyscall page. | ||
34 | * See vsyscall-sigreturn.S. | ||
35 | */ | ||
36 | extern void __user __kernel_sigreturn; | ||
37 | extern void __user __kernel_rt_sigreturn; | ||
38 | |||
28 | #endif /* asm-x86/vdso.h */ | 39 | #endif /* asm-x86/vdso.h */ |
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h index 0ecf68ac03aa..0ccf804377e6 100644 --- a/include/asm-x86/vga.h +++ b/include/asm-x86/vga.h | |||
@@ -12,9 +12,9 @@ | |||
12 | * access the videoram directly without any black magic. | 12 | * access the videoram directly without any black magic. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) | 15 | #define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) |
16 | 16 | ||
17 | #define vga_readb(x) (*(x)) | 17 | #define vga_readb(x) (*(x)) |
18 | #define vga_writeb(x,y) (*(y) = (x)) | 18 | #define vga_writeb(x, y) (*(y) = (x)) |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h index c92fe4af52e8..074b357146df 100644 --- a/include/asm-x86/vm86.h +++ b/include/asm-x86/vm86.h | |||
@@ -12,19 +12,13 @@ | |||
12 | * Linus | 12 | * Linus |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define TF_MASK 0x00000100 | 15 | #include <asm/processor-flags.h> |
16 | #define IF_MASK 0x00000200 | 16 | |
17 | #define IOPL_MASK 0x00003000 | ||
18 | #define NT_MASK 0x00004000 | ||
19 | #ifdef CONFIG_VM86 | 17 | #ifdef CONFIG_VM86 |
20 | #define VM_MASK 0x00020000 | 18 | #define X86_VM_MASK X86_EFLAGS_VM |
21 | #else | 19 | #else |
22 | #define VM_MASK 0 /* ignored */ | 20 | #define X86_VM_MASK 0 /* No VM86 support */ |
23 | #endif | 21 | #endif |
24 | #define AC_MASK 0x00040000 | ||
25 | #define VIF_MASK 0x00080000 /* virtual interrupt flag */ | ||
26 | #define VIP_MASK 0x00100000 /* virtual interrupt pending */ | ||
27 | #define ID_MASK 0x00200000 | ||
28 | 22 | ||
29 | #define BIOSSEG 0x0f000 | 23 | #define BIOSSEG 0x0f000 |
30 | 24 | ||
@@ -42,9 +36,11 @@ | |||
42 | #define VM86_ARG(retval) ((retval) >> 8) | 36 | #define VM86_ARG(retval) ((retval) >> 8) |
43 | 37 | ||
44 | #define VM86_SIGNAL 0 /* return due to signal */ | 38 | #define VM86_SIGNAL 0 /* return due to signal */ |
45 | #define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ | 39 | #define VM86_UNKNOWN 1 /* unhandled GP fault |
40 | - IO-instruction or similar */ | ||
46 | #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ | 41 | #define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ |
47 | #define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ | 42 | #define VM86_STI 3 /* sti/popf/iret instruction enabled |
43 | virtual interrupts */ | ||
48 | 44 | ||
49 | /* | 45 | /* |
50 | * Additional return values when invoking new vm86() | 46 | * Additional return values when invoking new vm86() |
@@ -205,7 +201,8 @@ void release_vm86_irqs(struct task_struct *); | |||
205 | #define handle_vm86_fault(a, b) | 201 | #define handle_vm86_fault(a, b) |
206 | #define release_vm86_irqs(a) | 202 | #define release_vm86_irqs(a) |
207 | 203 | ||
208 | static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { | 204 | static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) |
205 | { | ||
209 | return 0; | 206 | return 0; |
210 | } | 207 | } |
211 | 208 | ||
diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h index eb8bd892c01e..b7c0dea119fe 100644 --- a/include/asm-x86/vmi.h +++ b/include/asm-x86/vmi.h | |||
@@ -155,9 +155,9 @@ | |||
155 | 155 | ||
156 | #ifndef __ASSEMBLY__ | 156 | #ifndef __ASSEMBLY__ |
157 | struct vmi_relocation_info { | 157 | struct vmi_relocation_info { |
158 | unsigned char *eip; | 158 | unsigned char *eip; |
159 | unsigned char type; | 159 | unsigned char type; |
160 | unsigned char reserved[3]; | 160 | unsigned char reserved[3]; |
161 | }; | 161 | }; |
162 | #endif | 162 | #endif |
163 | 163 | ||
@@ -173,53 +173,53 @@ struct vmi_relocation_info { | |||
173 | #ifndef __ASSEMBLY__ | 173 | #ifndef __ASSEMBLY__ |
174 | 174 | ||
175 | struct vrom_header { | 175 | struct vrom_header { |
176 | u16 rom_signature; // option ROM signature | 176 | u16 rom_signature; /* option ROM signature */ |
177 | u8 rom_length; // ROM length in 512 byte chunks | 177 | u8 rom_length; /* ROM length in 512 byte chunks */ |
178 | u8 rom_entry[4]; // 16-bit code entry point | 178 | u8 rom_entry[4]; /* 16-bit code entry point */ |
179 | u8 rom_pad0; // 4-byte align pad | 179 | u8 rom_pad0; /* 4-byte align pad */ |
180 | u32 vrom_signature; // VROM identification signature | 180 | u32 vrom_signature; /* VROM identification signature */ |
181 | u8 api_version_min;// Minor version of API | 181 | u8 api_version_min;/* Minor version of API */ |
182 | u8 api_version_maj;// Major version of API | 182 | u8 api_version_maj;/* Major version of API */ |
183 | u8 jump_slots; // Number of jump slots | 183 | u8 jump_slots; /* Number of jump slots */ |
184 | u8 reserved1; // Reserved for expansion | 184 | u8 reserved1; /* Reserved for expansion */ |
185 | u32 virtual_top; // Hypervisor virtual address start | 185 | u32 virtual_top; /* Hypervisor virtual address start */ |
186 | u16 reserved2; // Reserved for expansion | 186 | u16 reserved2; /* Reserved for expansion */ |
187 | u16 license_offs; // Offset to License string | 187 | u16 license_offs; /* Offset to License string */ |
188 | u16 pci_header_offs;// Offset to PCI OPROM header | 188 | u16 pci_header_offs;/* Offset to PCI OPROM header */ |
189 | u16 pnp_header_offs;// Offset to PnP OPROM header | 189 | u16 pnp_header_offs;/* Offset to PnP OPROM header */ |
190 | u32 rom_pad3; // PnP reserverd / VMI reserved | 190 | u32 rom_pad3; /* PnP reserverd / VMI reserved */ |
191 | u8 reserved[96]; // Reserved for headers | 191 | u8 reserved[96]; /* Reserved for headers */ |
192 | char vmi_init[8]; // VMI_Init jump point | 192 | char vmi_init[8]; /* VMI_Init jump point */ |
193 | char get_reloc[8]; // VMI_GetRelocationInfo jump point | 193 | char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ |
194 | } __attribute__((packed)); | 194 | } __attribute__((packed)); |
195 | 195 | ||
196 | struct pnp_header { | 196 | struct pnp_header { |
197 | char sig[4]; | 197 | char sig[4]; |
198 | char rev; | 198 | char rev; |
199 | char size; | 199 | char size; |
200 | short next; | 200 | short next; |
201 | short res; | 201 | short res; |
202 | long devID; | 202 | long devID; |
203 | unsigned short manufacturer_offset; | 203 | unsigned short manufacturer_offset; |
204 | unsigned short product_offset; | 204 | unsigned short product_offset; |
205 | } __attribute__((packed)); | 205 | } __attribute__((packed)); |
206 | 206 | ||
207 | struct pci_header { | 207 | struct pci_header { |
208 | char sig[4]; | 208 | char sig[4]; |
209 | short vendorID; | 209 | short vendorID; |
210 | short deviceID; | 210 | short deviceID; |
211 | short vpdData; | 211 | short vpdData; |
212 | short size; | 212 | short size; |
213 | char rev; | 213 | char rev; |
214 | char class; | 214 | char class; |
215 | char subclass; | 215 | char subclass; |
216 | char interface; | 216 | char interface; |
217 | short chunks; | 217 | short chunks; |
218 | char rom_version_min; | 218 | char rom_version_min; |
219 | char rom_version_maj; | 219 | char rom_version_maj; |
220 | char codetype; | 220 | char codetype; |
221 | char lastRom; | 221 | char lastRom; |
222 | short reserved; | 222 | short reserved; |
223 | } __attribute__((packed)); | 223 | } __attribute__((packed)); |
224 | 224 | ||
225 | /* Function prototypes for bootstrapping */ | 225 | /* Function prototypes for bootstrapping */ |
diff --git a/include/asm-x86/voyager.h b/include/asm-x86/voyager.h index 91a9932937ab..9c811d2e6f91 100644 --- a/include/asm-x86/voyager.h +++ b/include/asm-x86/voyager.h | |||
@@ -91,8 +91,7 @@ | |||
91 | #define VOYAGER_WRITE_CONFIG 0x2 | 91 | #define VOYAGER_WRITE_CONFIG 0x2 |
92 | #define VOYAGER_BYPASS 0xff | 92 | #define VOYAGER_BYPASS 0xff |
93 | 93 | ||
94 | typedef struct voyager_asic | 94 | typedef struct voyager_asic { |
95 | { | ||
96 | __u8 asic_addr; /* ASIC address; Level 4 */ | 95 | __u8 asic_addr; /* ASIC address; Level 4 */ |
97 | __u8 asic_type; /* ASIC type */ | 96 | __u8 asic_type; /* ASIC type */ |
98 | __u8 asic_id; /* ASIC id */ | 97 | __u8 asic_id; /* ASIC id */ |
@@ -113,7 +112,7 @@ typedef struct voyager_module { | |||
113 | __u16 largest_reg; /* Largest register in the scan path */ | 112 | __u16 largest_reg; /* Largest register in the scan path */ |
114 | __u16 smallest_reg; /* Smallest register in the scan path */ | 113 | __u16 smallest_reg; /* Smallest register in the scan path */ |
115 | voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ | 114 | voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ |
116 | struct voyager_module *submodule; /* Submodule pointer */ | 115 | struct voyager_module *submodule; /* Submodule pointer */ |
117 | struct voyager_module *next; /* Next module in linked list */ | 116 | struct voyager_module *next; /* Next module in linked list */ |
118 | } voyager_module_t; | 117 | } voyager_module_t; |
119 | 118 | ||
@@ -135,7 +134,7 @@ typedef struct voyager_eeprom_hdr { | |||
135 | __u16 cct_offset; | 134 | __u16 cct_offset; |
136 | __u16 log_length; /* length of err log */ | 135 | __u16 log_length; /* length of err log */ |
137 | __u16 xsum_end; /* offset to end of | 136 | __u16 xsum_end; /* offset to end of |
138 | checksum */ | 137 | checksum */ |
139 | __u8 reserved[4]; | 138 | __u8 reserved[4]; |
140 | __u8 sflag; /* starting sentinal */ | 139 | __u8 sflag; /* starting sentinal */ |
141 | __u8 part_number[13]; /* prom part number */ | 140 | __u8 part_number[13]; /* prom part number */ |
@@ -148,7 +147,8 @@ typedef struct voyager_eeprom_hdr { | |||
148 | 147 | ||
149 | 148 | ||
150 | 149 | ||
151 | #define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) | 150 | #define VOYAGER_EPROM_SIZE_OFFSET \ |
151 | ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) | ||
152 | #define VOYAGER_XSUM_END_OFFSET 0x2a | 152 | #define VOYAGER_XSUM_END_OFFSET 0x2a |
153 | 153 | ||
154 | /* the following three definitions are for internal table layouts | 154 | /* the following three definitions are for internal table layouts |
@@ -199,7 +199,7 @@ typedef struct voyager_asic_data_table { | |||
199 | #define VOYAGER_WCBIC_TOM_L 0x4 | 199 | #define VOYAGER_WCBIC_TOM_L 0x4 |
200 | #define VOYAGER_WCBIC_TOM_H 0x5 | 200 | #define VOYAGER_WCBIC_TOM_H 0x5 |
201 | 201 | ||
202 | /* register defines for Voyager Memory Contol (VMC) | 202 | /* register defines for Voyager Memory Contol (VMC) |
203 | * these are present on L4 machines only */ | 203 | * these are present on L4 machines only */ |
204 | #define VOYAGER_VMC1 0x81 | 204 | #define VOYAGER_VMC1 0x81 |
205 | #define VOYAGER_VMC2 0x91 | 205 | #define VOYAGER_VMC2 0x91 |
@@ -334,7 +334,7 @@ typedef struct { | |||
334 | 334 | ||
335 | struct QuadDescription { | 335 | struct QuadDescription { |
336 | __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields | 336 | __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields |
337 | * will be zero except for slot */ | 337 | * will be zero except for slot */ |
338 | __u8 StructureVersion; | 338 | __u8 StructureVersion; |
339 | __u32 CPI_BaseAddress; | 339 | __u32 CPI_BaseAddress; |
340 | __u32 LARC_BankSize; | 340 | __u32 LARC_BankSize; |
@@ -342,7 +342,7 @@ struct QuadDescription { | |||
342 | __u8 Slot; /* Processor slots 1 - 4 */ | 342 | __u8 Slot; /* Processor slots 1 - 4 */ |
343 | } __attribute__((packed)); | 343 | } __attribute__((packed)); |
344 | 344 | ||
345 | struct ProcBoardInfo { | 345 | struct ProcBoardInfo { |
346 | __u8 Type; | 346 | __u8 Type; |
347 | __u8 StructureVersion; | 347 | __u8 StructureVersion; |
348 | __u8 NumberOfBoards; | 348 | __u8 NumberOfBoards; |
@@ -382,19 +382,30 @@ struct CPU_Info { | |||
382 | * packed in it by our friend the compiler. | 382 | * packed in it by our friend the compiler. |
383 | */ | 383 | */ |
384 | typedef struct { | 384 | typedef struct { |
385 | __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ | 385 | __u8 Mailbox_SUS; /* Written to by SUS to give |
386 | __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ | 386 | commands/response to the OS */ |
387 | __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ | 387 | __u8 Mailbox_OS; /* Written to by the OS to give |
388 | __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ | 388 | commands/response to SUS */ |
389 | __u32 OS_Flags; /* Flags set by the OS as info for SUS */ | 389 | __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the |
390 | __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ | 390 | interface SUS supports */ |
391 | __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ | 391 | __u8 OS_MailboxVersion; /* Tells SUS which iteration of the |
392 | interface the OS supports */ | ||
393 | __u32 OS_Flags; /* Flags set by the OS as info for | ||
394 | SUS */ | ||
395 | __u32 SUS_Flags; /* Flags set by SUS as info | ||
396 | for the OS */ | ||
397 | __u32 WatchDogPeriod; /* Watchdog period (in seconds) which | ||
398 | the DP uses to see if the OS | ||
399 | is dead */ | ||
392 | __u32 WatchDogCount; /* Updated by the OS on every tic. */ | 400 | __u32 WatchDogCount; /* Updated by the OS on every tic. */ |
393 | __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ | 401 | __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS |
394 | MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ | 402 | where to stuff the SUS error log |
403 | on a dump */ | ||
404 | MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; | ||
405 | /* Storage for MCA POS data */ | ||
395 | /* All new SECOND_PASS_INTERFACE fields added from this point */ | 406 | /* All new SECOND_PASS_INTERFACE fields added from this point */ |
396 | struct ProcBoardInfo *BoardData; | 407 | struct ProcBoardInfo *BoardData; |
397 | struct CPU_Info *CPU_Data; | 408 | struct CPU_Info *CPU_Data; |
398 | /* All new fields must be added from this point */ | 409 | /* All new fields must be added from this point */ |
399 | } Voyager_KernelSUS_Mbox_t; | 410 | } Voyager_KernelSUS_Mbox_t; |
400 | 411 | ||
@@ -478,7 +489,7 @@ struct voyager_SUS { | |||
478 | __u32 SUS_errorlog; | 489 | __u32 SUS_errorlog; |
479 | /* lots of system configuration stuff under here */ | 490 | /* lots of system configuration stuff under here */ |
480 | }; | 491 | }; |
481 | 492 | ||
482 | /* Variables exported by voyager_smp */ | 493 | /* Variables exported by voyager_smp */ |
483 | extern __u32 voyager_extended_vic_processors; | 494 | extern __u32 voyager_extended_vic_processors; |
484 | extern __u32 voyager_allowed_boot_processors; | 495 | extern __u32 voyager_allowed_boot_processors; |
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h index a41ef1bdd424..067b5c1835a3 100644 --- a/include/asm-x86/xor_32.h +++ b/include/asm-x86/xor_32.h | |||
@@ -16,12 +16,12 @@ | |||
16 | * Copyright (C) 1998 Ingo Molnar. | 16 | * Copyright (C) 1998 Ingo Molnar. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" | 19 | #define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n" |
20 | #define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" | 20 | #define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n" |
21 | #define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" | 21 | #define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" |
22 | #define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" | 22 | #define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" |
23 | #define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" | 23 | #define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" |
24 | #define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" | 24 | #define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" |
25 | 25 | ||
26 | #include <asm/i387.h> | 26 | #include <asm/i387.h> |
27 | 27 | ||
@@ -32,24 +32,24 @@ xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
32 | 32 | ||
33 | kernel_fpu_begin(); | 33 | kernel_fpu_begin(); |
34 | 34 | ||
35 | __asm__ __volatile__ ( | 35 | asm volatile( |
36 | #undef BLOCK | 36 | #undef BLOCK |
37 | #define BLOCK(i) \ | 37 | #define BLOCK(i) \ |
38 | LD(i,0) \ | 38 | LD(i, 0) \ |
39 | LD(i+1,1) \ | 39 | LD(i + 1, 1) \ |
40 | LD(i+2,2) \ | 40 | LD(i + 2, 2) \ |
41 | LD(i+3,3) \ | 41 | LD(i + 3, 3) \ |
42 | XO1(i,0) \ | 42 | XO1(i, 0) \ |
43 | ST(i,0) \ | 43 | ST(i, 0) \ |
44 | XO1(i+1,1) \ | 44 | XO1(i+1, 1) \ |
45 | ST(i+1,1) \ | 45 | ST(i+1, 1) \ |
46 | XO1(i+2,2) \ | 46 | XO1(i + 2, 2) \ |
47 | ST(i+2,2) \ | 47 | ST(i + 2, 2) \ |
48 | XO1(i+3,3) \ | 48 | XO1(i + 3, 3) \ |
49 | ST(i+3,3) | 49 | ST(i + 3, 3) |
50 | 50 | ||
51 | " .align 32 ;\n" | 51 | " .align 32 ;\n" |
52 | " 1: ;\n" | 52 | " 1: ;\n" |
53 | 53 | ||
54 | BLOCK(0) | 54 | BLOCK(0) |
55 | BLOCK(4) | 55 | BLOCK(4) |
@@ -76,25 +76,25 @@ xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
76 | 76 | ||
77 | kernel_fpu_begin(); | 77 | kernel_fpu_begin(); |
78 | 78 | ||
79 | __asm__ __volatile__ ( | 79 | asm volatile( |
80 | #undef BLOCK | 80 | #undef BLOCK |
81 | #define BLOCK(i) \ | 81 | #define BLOCK(i) \ |
82 | LD(i,0) \ | 82 | LD(i, 0) \ |
83 | LD(i+1,1) \ | 83 | LD(i + 1, 1) \ |
84 | LD(i+2,2) \ | 84 | LD(i + 2, 2) \ |
85 | LD(i+3,3) \ | 85 | LD(i + 3, 3) \ |
86 | XO1(i,0) \ | 86 | XO1(i, 0) \ |
87 | XO1(i+1,1) \ | 87 | XO1(i + 1, 1) \ |
88 | XO1(i+2,2) \ | 88 | XO1(i + 2, 2) \ |
89 | XO1(i+3,3) \ | 89 | XO1(i + 3, 3) \ |
90 | XO2(i,0) \ | 90 | XO2(i, 0) \ |
91 | ST(i,0) \ | 91 | ST(i, 0) \ |
92 | XO2(i+1,1) \ | 92 | XO2(i + 1, 1) \ |
93 | ST(i+1,1) \ | 93 | ST(i + 1, 1) \ |
94 | XO2(i+2,2) \ | 94 | XO2(i + 2, 2) \ |
95 | ST(i+2,2) \ | 95 | ST(i + 2, 2) \ |
96 | XO2(i+3,3) \ | 96 | XO2(i + 3, 3) \ |
97 | ST(i+3,3) | 97 | ST(i + 3, 3) |
98 | 98 | ||
99 | " .align 32 ;\n" | 99 | " .align 32 ;\n" |
100 | " 1: ;\n" | 100 | " 1: ;\n" |
@@ -125,29 +125,29 @@ xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
125 | 125 | ||
126 | kernel_fpu_begin(); | 126 | kernel_fpu_begin(); |
127 | 127 | ||
128 | __asm__ __volatile__ ( | 128 | asm volatile( |
129 | #undef BLOCK | 129 | #undef BLOCK |
130 | #define BLOCK(i) \ | 130 | #define BLOCK(i) \ |
131 | LD(i,0) \ | 131 | LD(i, 0) \ |
132 | LD(i+1,1) \ | 132 | LD(i + 1, 1) \ |
133 | LD(i+2,2) \ | 133 | LD(i + 2, 2) \ |
134 | LD(i+3,3) \ | 134 | LD(i + 3, 3) \ |
135 | XO1(i,0) \ | 135 | XO1(i, 0) \ |
136 | XO1(i+1,1) \ | 136 | XO1(i + 1, 1) \ |
137 | XO1(i+2,2) \ | 137 | XO1(i + 2, 2) \ |
138 | XO1(i+3,3) \ | 138 | XO1(i + 3, 3) \ |
139 | XO2(i,0) \ | 139 | XO2(i, 0) \ |
140 | XO2(i+1,1) \ | 140 | XO2(i + 1, 1) \ |
141 | XO2(i+2,2) \ | 141 | XO2(i + 2, 2) \ |
142 | XO2(i+3,3) \ | 142 | XO2(i + 3, 3) \ |
143 | XO3(i,0) \ | 143 | XO3(i, 0) \ |
144 | ST(i,0) \ | 144 | ST(i, 0) \ |
145 | XO3(i+1,1) \ | 145 | XO3(i + 1, 1) \ |
146 | ST(i+1,1) \ | 146 | ST(i + 1, 1) \ |
147 | XO3(i+2,2) \ | 147 | XO3(i + 2, 2) \ |
148 | ST(i+2,2) \ | 148 | ST(i + 2, 2) \ |
149 | XO3(i+3,3) \ | 149 | XO3(i + 3, 3) \ |
150 | ST(i+3,3) | 150 | ST(i + 3, 3) |
151 | 151 | ||
152 | " .align 32 ;\n" | 152 | " .align 32 ;\n" |
153 | " 1: ;\n" | 153 | " 1: ;\n" |
@@ -186,35 +186,35 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
186 | because we modify p4 and p5 there, but we can't mark them | 186 | because we modify p4 and p5 there, but we can't mark them |
187 | as read/write, otherwise we'd overflow the 10-asm-operands | 187 | as read/write, otherwise we'd overflow the 10-asm-operands |
188 | limit of GCC < 3.1. */ | 188 | limit of GCC < 3.1. */ |
189 | __asm__ ("" : "+r" (p4), "+r" (p5)); | 189 | asm("" : "+r" (p4), "+r" (p5)); |
190 | 190 | ||
191 | __asm__ __volatile__ ( | 191 | asm volatile( |
192 | #undef BLOCK | 192 | #undef BLOCK |
193 | #define BLOCK(i) \ | 193 | #define BLOCK(i) \ |
194 | LD(i,0) \ | 194 | LD(i, 0) \ |
195 | LD(i+1,1) \ | 195 | LD(i + 1, 1) \ |
196 | LD(i+2,2) \ | 196 | LD(i + 2, 2) \ |
197 | LD(i+3,3) \ | 197 | LD(i + 3, 3) \ |
198 | XO1(i,0) \ | 198 | XO1(i, 0) \ |
199 | XO1(i+1,1) \ | 199 | XO1(i + 1, 1) \ |
200 | XO1(i+2,2) \ | 200 | XO1(i + 2, 2) \ |
201 | XO1(i+3,3) \ | 201 | XO1(i + 3, 3) \ |
202 | XO2(i,0) \ | 202 | XO2(i, 0) \ |
203 | XO2(i+1,1) \ | 203 | XO2(i + 1, 1) \ |
204 | XO2(i+2,2) \ | 204 | XO2(i + 2, 2) \ |
205 | XO2(i+3,3) \ | 205 | XO2(i + 3, 3) \ |
206 | XO3(i,0) \ | 206 | XO3(i, 0) \ |
207 | XO3(i+1,1) \ | 207 | XO3(i + 1, 1) \ |
208 | XO3(i+2,2) \ | 208 | XO3(i + 2, 2) \ |
209 | XO3(i+3,3) \ | 209 | XO3(i + 3, 3) \ |
210 | XO4(i,0) \ | 210 | XO4(i, 0) \ |
211 | ST(i,0) \ | 211 | ST(i, 0) \ |
212 | XO4(i+1,1) \ | 212 | XO4(i + 1, 1) \ |
213 | ST(i+1,1) \ | 213 | ST(i + 1, 1) \ |
214 | XO4(i+2,2) \ | 214 | XO4(i + 2, 2) \ |
215 | ST(i+2,2) \ | 215 | ST(i + 2, 2) \ |
216 | XO4(i+3,3) \ | 216 | XO4(i + 3, 3) \ |
217 | ST(i+3,3) | 217 | ST(i + 3, 3) |
218 | 218 | ||
219 | " .align 32 ;\n" | 219 | " .align 32 ;\n" |
220 | " 1: ;\n" | 220 | " 1: ;\n" |
@@ -233,13 +233,13 @@ xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
233 | " jnz 1b ;\n" | 233 | " jnz 1b ;\n" |
234 | : "+r" (lines), | 234 | : "+r" (lines), |
235 | "+r" (p1), "+r" (p2), "+r" (p3) | 235 | "+r" (p1), "+r" (p2), "+r" (p3) |
236 | : "r" (p4), "r" (p5) | 236 | : "r" (p4), "r" (p5) |
237 | : "memory"); | 237 | : "memory"); |
238 | 238 | ||
239 | /* p4 and p5 were modified, and now the variables are dead. | 239 | /* p4 and p5 were modified, and now the variables are dead. |
240 | Clobber them just to be sure nobody does something stupid | 240 | Clobber them just to be sure nobody does something stupid |
241 | like assuming they have some legal value. */ | 241 | like assuming they have some legal value. */ |
242 | __asm__ ("" : "=r" (p4), "=r" (p5)); | 242 | asm("" : "=r" (p4), "=r" (p5)); |
243 | 243 | ||
244 | kernel_fpu_end(); | 244 | kernel_fpu_end(); |
245 | } | 245 | } |
@@ -259,7 +259,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
259 | 259 | ||
260 | kernel_fpu_begin(); | 260 | kernel_fpu_begin(); |
261 | 261 | ||
262 | __asm__ __volatile__ ( | 262 | asm volatile( |
263 | " .align 32 ;\n" | 263 | " .align 32 ;\n" |
264 | " 1: ;\n" | 264 | " 1: ;\n" |
265 | " movq (%1), %%mm0 ;\n" | 265 | " movq (%1), %%mm0 ;\n" |
@@ -286,7 +286,7 @@ xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | |||
286 | " pxor 56(%2), %%mm7 ;\n" | 286 | " pxor 56(%2), %%mm7 ;\n" |
287 | " movq %%mm6, 48(%1) ;\n" | 287 | " movq %%mm6, 48(%1) ;\n" |
288 | " movq %%mm7, 56(%1) ;\n" | 288 | " movq %%mm7, 56(%1) ;\n" |
289 | 289 | ||
290 | " addl $64, %1 ;\n" | 290 | " addl $64, %1 ;\n" |
291 | " addl $64, %2 ;\n" | 291 | " addl $64, %2 ;\n" |
292 | " decl %0 ;\n" | 292 | " decl %0 ;\n" |
@@ -307,7 +307,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
307 | 307 | ||
308 | kernel_fpu_begin(); | 308 | kernel_fpu_begin(); |
309 | 309 | ||
310 | __asm__ __volatile__ ( | 310 | asm volatile( |
311 | " .align 32,0x90 ;\n" | 311 | " .align 32,0x90 ;\n" |
312 | " 1: ;\n" | 312 | " 1: ;\n" |
313 | " movq (%1), %%mm0 ;\n" | 313 | " movq (%1), %%mm0 ;\n" |
@@ -342,7 +342,7 @@ xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
342 | " pxor 56(%3), %%mm7 ;\n" | 342 | " pxor 56(%3), %%mm7 ;\n" |
343 | " movq %%mm6, 48(%1) ;\n" | 343 | " movq %%mm6, 48(%1) ;\n" |
344 | " movq %%mm7, 56(%1) ;\n" | 344 | " movq %%mm7, 56(%1) ;\n" |
345 | 345 | ||
346 | " addl $64, %1 ;\n" | 346 | " addl $64, %1 ;\n" |
347 | " addl $64, %2 ;\n" | 347 | " addl $64, %2 ;\n" |
348 | " addl $64, %3 ;\n" | 348 | " addl $64, %3 ;\n" |
@@ -364,7 +364,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
364 | 364 | ||
365 | kernel_fpu_begin(); | 365 | kernel_fpu_begin(); |
366 | 366 | ||
367 | __asm__ __volatile__ ( | 367 | asm volatile( |
368 | " .align 32,0x90 ;\n" | 368 | " .align 32,0x90 ;\n" |
369 | " 1: ;\n" | 369 | " 1: ;\n" |
370 | " movq (%1), %%mm0 ;\n" | 370 | " movq (%1), %%mm0 ;\n" |
@@ -407,7 +407,7 @@ xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
407 | " pxor 56(%4), %%mm7 ;\n" | 407 | " pxor 56(%4), %%mm7 ;\n" |
408 | " movq %%mm6, 48(%1) ;\n" | 408 | " movq %%mm6, 48(%1) ;\n" |
409 | " movq %%mm7, 56(%1) ;\n" | 409 | " movq %%mm7, 56(%1) ;\n" |
410 | 410 | ||
411 | " addl $64, %1 ;\n" | 411 | " addl $64, %1 ;\n" |
412 | " addl $64, %2 ;\n" | 412 | " addl $64, %2 ;\n" |
413 | " addl $64, %3 ;\n" | 413 | " addl $64, %3 ;\n" |
@@ -436,9 +436,9 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
436 | because we modify p4 and p5 there, but we can't mark them | 436 | because we modify p4 and p5 there, but we can't mark them |
437 | as read/write, otherwise we'd overflow the 10-asm-operands | 437 | as read/write, otherwise we'd overflow the 10-asm-operands |
438 | limit of GCC < 3.1. */ | 438 | limit of GCC < 3.1. */ |
439 | __asm__ ("" : "+r" (p4), "+r" (p5)); | 439 | asm("" : "+r" (p4), "+r" (p5)); |
440 | 440 | ||
441 | __asm__ __volatile__ ( | 441 | asm volatile( |
442 | " .align 32,0x90 ;\n" | 442 | " .align 32,0x90 ;\n" |
443 | " 1: ;\n" | 443 | " 1: ;\n" |
444 | " movq (%1), %%mm0 ;\n" | 444 | " movq (%1), %%mm0 ;\n" |
@@ -489,7 +489,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
489 | " pxor 56(%5), %%mm7 ;\n" | 489 | " pxor 56(%5), %%mm7 ;\n" |
490 | " movq %%mm6, 48(%1) ;\n" | 490 | " movq %%mm6, 48(%1) ;\n" |
491 | " movq %%mm7, 56(%1) ;\n" | 491 | " movq %%mm7, 56(%1) ;\n" |
492 | 492 | ||
493 | " addl $64, %1 ;\n" | 493 | " addl $64, %1 ;\n" |
494 | " addl $64, %2 ;\n" | 494 | " addl $64, %2 ;\n" |
495 | " addl $64, %3 ;\n" | 495 | " addl $64, %3 ;\n" |
@@ -505,7 +505,7 @@ xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
505 | /* p4 and p5 were modified, and now the variables are dead. | 505 | /* p4 and p5 were modified, and now the variables are dead. |
506 | Clobber them just to be sure nobody does something stupid | 506 | Clobber them just to be sure nobody does something stupid |
507 | like assuming they have some legal value. */ | 507 | like assuming they have some legal value. */ |
508 | __asm__ ("" : "=r" (p4), "=r" (p5)); | 508 | asm("" : "=r" (p4), "=r" (p5)); |
509 | 509 | ||
510 | kernel_fpu_end(); | 510 | kernel_fpu_end(); |
511 | } | 511 | } |
@@ -531,11 +531,12 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
531 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | 531 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) |
532 | */ | 532 | */ |
533 | 533 | ||
534 | #define XMMS_SAVE do { \ | 534 | #define XMMS_SAVE \ |
535 | do { \ | ||
535 | preempt_disable(); \ | 536 | preempt_disable(); \ |
536 | cr0 = read_cr0(); \ | 537 | cr0 = read_cr0(); \ |
537 | clts(); \ | 538 | clts(); \ |
538 | __asm__ __volatile__ ( \ | 539 | asm volatile( \ |
539 | "movups %%xmm0,(%0) ;\n\t" \ | 540 | "movups %%xmm0,(%0) ;\n\t" \ |
540 | "movups %%xmm1,0x10(%0) ;\n\t" \ | 541 | "movups %%xmm1,0x10(%0) ;\n\t" \ |
541 | "movups %%xmm2,0x20(%0) ;\n\t" \ | 542 | "movups %%xmm2,0x20(%0) ;\n\t" \ |
@@ -543,10 +544,11 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
543 | : \ | 544 | : \ |
544 | : "r" (xmm_save) \ | 545 | : "r" (xmm_save) \ |
545 | : "memory"); \ | 546 | : "memory"); \ |
546 | } while(0) | 547 | } while (0) |
547 | 548 | ||
548 | #define XMMS_RESTORE do { \ | 549 | #define XMMS_RESTORE \ |
549 | __asm__ __volatile__ ( \ | 550 | do { \ |
551 | asm volatile( \ | ||
550 | "sfence ;\n\t" \ | 552 | "sfence ;\n\t" \ |
551 | "movups (%0),%%xmm0 ;\n\t" \ | 553 | "movups (%0),%%xmm0 ;\n\t" \ |
552 | "movups 0x10(%0),%%xmm1 ;\n\t" \ | 554 | "movups 0x10(%0),%%xmm1 ;\n\t" \ |
@@ -557,76 +559,76 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
557 | : "memory"); \ | 559 | : "memory"); \ |
558 | write_cr0(cr0); \ | 560 | write_cr0(cr0); \ |
559 | preempt_enable(); \ | 561 | preempt_enable(); \ |
560 | } while(0) | 562 | } while (0) |
561 | 563 | ||
562 | #define ALIGN16 __attribute__((aligned(16))) | 564 | #define ALIGN16 __attribute__((aligned(16))) |
563 | 565 | ||
564 | #define OFFS(x) "16*("#x")" | 566 | #define OFFS(x) "16*("#x")" |
565 | #define PF_OFFS(x) "256+16*("#x")" | 567 | #define PF_OFFS(x) "256+16*("#x")" |
566 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" | 568 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" |
567 | #define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" | 569 | #define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" |
568 | #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" | 570 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" |
569 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" | 571 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" |
570 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" | 572 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" |
571 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" | 573 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" |
572 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" | 574 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" |
573 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" | 575 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" |
574 | #define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" | 576 | #define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" |
575 | #define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" | 577 | #define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" |
576 | #define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" | 578 | #define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" |
577 | #define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" | 579 | #define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" |
578 | #define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" | 580 | #define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" |
579 | 581 | ||
580 | 582 | ||
581 | static void | 583 | static void |
582 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | 584 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) |
583 | { | 585 | { |
584 | unsigned long lines = bytes >> 8; | 586 | unsigned long lines = bytes >> 8; |
585 | char xmm_save[16*4] ALIGN16; | 587 | char xmm_save[16*4] ALIGN16; |
586 | int cr0; | 588 | int cr0; |
587 | 589 | ||
588 | XMMS_SAVE; | 590 | XMMS_SAVE; |
589 | 591 | ||
590 | __asm__ __volatile__ ( | 592 | asm volatile( |
591 | #undef BLOCK | 593 | #undef BLOCK |
592 | #define BLOCK(i) \ | 594 | #define BLOCK(i) \ |
593 | LD(i,0) \ | 595 | LD(i, 0) \ |
594 | LD(i+1,1) \ | 596 | LD(i + 1, 1) \ |
595 | PF1(i) \ | 597 | PF1(i) \ |
596 | PF1(i+2) \ | 598 | PF1(i + 2) \ |
597 | LD(i+2,2) \ | 599 | LD(i + 2, 2) \ |
598 | LD(i+3,3) \ | 600 | LD(i + 3, 3) \ |
599 | PF0(i+4) \ | 601 | PF0(i + 4) \ |
600 | PF0(i+6) \ | 602 | PF0(i + 6) \ |
601 | XO1(i,0) \ | 603 | XO1(i, 0) \ |
602 | XO1(i+1,1) \ | 604 | XO1(i + 1, 1) \ |
603 | XO1(i+2,2) \ | 605 | XO1(i + 2, 2) \ |
604 | XO1(i+3,3) \ | 606 | XO1(i + 3, 3) \ |
605 | ST(i,0) \ | 607 | ST(i, 0) \ |
606 | ST(i+1,1) \ | 608 | ST(i + 1, 1) \ |
607 | ST(i+2,2) \ | 609 | ST(i + 2, 2) \ |
608 | ST(i+3,3) \ | 610 | ST(i + 3, 3) \ |
609 | 611 | ||
610 | 612 | ||
611 | PF0(0) | 613 | PF0(0) |
612 | PF0(2) | 614 | PF0(2) |
613 | 615 | ||
614 | " .align 32 ;\n" | 616 | " .align 32 ;\n" |
615 | " 1: ;\n" | 617 | " 1: ;\n" |
616 | 618 | ||
617 | BLOCK(0) | 619 | BLOCK(0) |
618 | BLOCK(4) | 620 | BLOCK(4) |
619 | BLOCK(8) | 621 | BLOCK(8) |
620 | BLOCK(12) | 622 | BLOCK(12) |
621 | 623 | ||
622 | " addl $256, %1 ;\n" | 624 | " addl $256, %1 ;\n" |
623 | " addl $256, %2 ;\n" | 625 | " addl $256, %2 ;\n" |
624 | " decl %0 ;\n" | 626 | " decl %0 ;\n" |
625 | " jnz 1b ;\n" | 627 | " jnz 1b ;\n" |
626 | : "+r" (lines), | 628 | : "+r" (lines), |
627 | "+r" (p1), "+r" (p2) | 629 | "+r" (p1), "+r" (p2) |
628 | : | 630 | : |
629 | : "memory"); | 631 | : "memory"); |
630 | 632 | ||
631 | XMMS_RESTORE; | 633 | XMMS_RESTORE; |
632 | } | 634 | } |
@@ -635,59 +637,59 @@ static void | |||
635 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 637 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
636 | unsigned long *p3) | 638 | unsigned long *p3) |
637 | { | 639 | { |
638 | unsigned long lines = bytes >> 8; | 640 | unsigned long lines = bytes >> 8; |
639 | char xmm_save[16*4] ALIGN16; | 641 | char xmm_save[16*4] ALIGN16; |
640 | int cr0; | 642 | int cr0; |
641 | 643 | ||
642 | XMMS_SAVE; | 644 | XMMS_SAVE; |
643 | 645 | ||
644 | __asm__ __volatile__ ( | 646 | asm volatile( |
645 | #undef BLOCK | 647 | #undef BLOCK |
646 | #define BLOCK(i) \ | 648 | #define BLOCK(i) \ |
647 | PF1(i) \ | 649 | PF1(i) \ |
648 | PF1(i+2) \ | 650 | PF1(i + 2) \ |
649 | LD(i,0) \ | 651 | LD(i,0) \ |
650 | LD(i+1,1) \ | 652 | LD(i + 1, 1) \ |
651 | LD(i+2,2) \ | 653 | LD(i + 2, 2) \ |
652 | LD(i+3,3) \ | 654 | LD(i + 3, 3) \ |
653 | PF2(i) \ | 655 | PF2(i) \ |
654 | PF2(i+2) \ | 656 | PF2(i + 2) \ |
655 | PF0(i+4) \ | 657 | PF0(i + 4) \ |
656 | PF0(i+6) \ | 658 | PF0(i + 6) \ |
657 | XO1(i,0) \ | 659 | XO1(i,0) \ |
658 | XO1(i+1,1) \ | 660 | XO1(i + 1, 1) \ |
659 | XO1(i+2,2) \ | 661 | XO1(i + 2, 2) \ |
660 | XO1(i+3,3) \ | 662 | XO1(i + 3, 3) \ |
661 | XO2(i,0) \ | 663 | XO2(i,0) \ |
662 | XO2(i+1,1) \ | 664 | XO2(i + 1, 1) \ |
663 | XO2(i+2,2) \ | 665 | XO2(i + 2, 2) \ |
664 | XO2(i+3,3) \ | 666 | XO2(i + 3, 3) \ |
665 | ST(i,0) \ | 667 | ST(i,0) \ |
666 | ST(i+1,1) \ | 668 | ST(i + 1, 1) \ |
667 | ST(i+2,2) \ | 669 | ST(i + 2, 2) \ |
668 | ST(i+3,3) \ | 670 | ST(i + 3, 3) \ |
669 | 671 | ||
670 | 672 | ||
671 | PF0(0) | 673 | PF0(0) |
672 | PF0(2) | 674 | PF0(2) |
673 | 675 | ||
674 | " .align 32 ;\n" | 676 | " .align 32 ;\n" |
675 | " 1: ;\n" | 677 | " 1: ;\n" |
676 | 678 | ||
677 | BLOCK(0) | 679 | BLOCK(0) |
678 | BLOCK(4) | 680 | BLOCK(4) |
679 | BLOCK(8) | 681 | BLOCK(8) |
680 | BLOCK(12) | 682 | BLOCK(12) |
681 | 683 | ||
682 | " addl $256, %1 ;\n" | 684 | " addl $256, %1 ;\n" |
683 | " addl $256, %2 ;\n" | 685 | " addl $256, %2 ;\n" |
684 | " addl $256, %3 ;\n" | 686 | " addl $256, %3 ;\n" |
685 | " decl %0 ;\n" | 687 | " decl %0 ;\n" |
686 | " jnz 1b ;\n" | 688 | " jnz 1b ;\n" |
687 | : "+r" (lines), | 689 | : "+r" (lines), |
688 | "+r" (p1), "+r"(p2), "+r"(p3) | 690 | "+r" (p1), "+r"(p2), "+r"(p3) |
689 | : | 691 | : |
690 | : "memory" ); | 692 | : "memory" ); |
691 | 693 | ||
692 | XMMS_RESTORE; | 694 | XMMS_RESTORE; |
693 | } | 695 | } |
@@ -696,66 +698,66 @@ static void | |||
696 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 698 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
697 | unsigned long *p3, unsigned long *p4) | 699 | unsigned long *p3, unsigned long *p4) |
698 | { | 700 | { |
699 | unsigned long lines = bytes >> 8; | 701 | unsigned long lines = bytes >> 8; |
700 | char xmm_save[16*4] ALIGN16; | 702 | char xmm_save[16*4] ALIGN16; |
701 | int cr0; | 703 | int cr0; |
702 | 704 | ||
703 | XMMS_SAVE; | 705 | XMMS_SAVE; |
704 | 706 | ||
705 | __asm__ __volatile__ ( | 707 | asm volatile( |
706 | #undef BLOCK | 708 | #undef BLOCK |
707 | #define BLOCK(i) \ | 709 | #define BLOCK(i) \ |
708 | PF1(i) \ | 710 | PF1(i) \ |
709 | PF1(i+2) \ | 711 | PF1(i + 2) \ |
710 | LD(i,0) \ | 712 | LD(i,0) \ |
711 | LD(i+1,1) \ | 713 | LD(i + 1, 1) \ |
712 | LD(i+2,2) \ | 714 | LD(i + 2, 2) \ |
713 | LD(i+3,3) \ | 715 | LD(i + 3, 3) \ |
714 | PF2(i) \ | 716 | PF2(i) \ |
715 | PF2(i+2) \ | 717 | PF2(i + 2) \ |
716 | XO1(i,0) \ | 718 | XO1(i,0) \ |
717 | XO1(i+1,1) \ | 719 | XO1(i + 1, 1) \ |
718 | XO1(i+2,2) \ | 720 | XO1(i + 2, 2) \ |
719 | XO1(i+3,3) \ | 721 | XO1(i + 3, 3) \ |
720 | PF3(i) \ | 722 | PF3(i) \ |
721 | PF3(i+2) \ | 723 | PF3(i + 2) \ |
722 | PF0(i+4) \ | 724 | PF0(i + 4) \ |
723 | PF0(i+6) \ | 725 | PF0(i + 6) \ |
724 | XO2(i,0) \ | 726 | XO2(i,0) \ |
725 | XO2(i+1,1) \ | 727 | XO2(i + 1, 1) \ |
726 | XO2(i+2,2) \ | 728 | XO2(i + 2, 2) \ |
727 | XO2(i+3,3) \ | 729 | XO2(i + 3, 3) \ |
728 | XO3(i,0) \ | 730 | XO3(i,0) \ |
729 | XO3(i+1,1) \ | 731 | XO3(i + 1, 1) \ |
730 | XO3(i+2,2) \ | 732 | XO3(i + 2, 2) \ |
731 | XO3(i+3,3) \ | 733 | XO3(i + 3, 3) \ |
732 | ST(i,0) \ | 734 | ST(i,0) \ |
733 | ST(i+1,1) \ | 735 | ST(i + 1, 1) \ |
734 | ST(i+2,2) \ | 736 | ST(i + 2, 2) \ |
735 | ST(i+3,3) \ | 737 | ST(i + 3, 3) \ |
736 | 738 | ||
737 | 739 | ||
738 | PF0(0) | 740 | PF0(0) |
739 | PF0(2) | 741 | PF0(2) |
740 | 742 | ||
741 | " .align 32 ;\n" | 743 | " .align 32 ;\n" |
742 | " 1: ;\n" | 744 | " 1: ;\n" |
743 | 745 | ||
744 | BLOCK(0) | 746 | BLOCK(0) |
745 | BLOCK(4) | 747 | BLOCK(4) |
746 | BLOCK(8) | 748 | BLOCK(8) |
747 | BLOCK(12) | 749 | BLOCK(12) |
748 | 750 | ||
749 | " addl $256, %1 ;\n" | 751 | " addl $256, %1 ;\n" |
750 | " addl $256, %2 ;\n" | 752 | " addl $256, %2 ;\n" |
751 | " addl $256, %3 ;\n" | 753 | " addl $256, %3 ;\n" |
752 | " addl $256, %4 ;\n" | 754 | " addl $256, %4 ;\n" |
753 | " decl %0 ;\n" | 755 | " decl %0 ;\n" |
754 | " jnz 1b ;\n" | 756 | " jnz 1b ;\n" |
755 | : "+r" (lines), | 757 | : "+r" (lines), |
756 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | 758 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) |
757 | : | 759 | : |
758 | : "memory" ); | 760 | : "memory" ); |
759 | 761 | ||
760 | XMMS_RESTORE; | 762 | XMMS_RESTORE; |
761 | } | 763 | } |
@@ -764,7 +766,7 @@ static void | |||
764 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 766 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
765 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | 767 | unsigned long *p3, unsigned long *p4, unsigned long *p5) |
766 | { | 768 | { |
767 | unsigned long lines = bytes >> 8; | 769 | unsigned long lines = bytes >> 8; |
768 | char xmm_save[16*4] ALIGN16; | 770 | char xmm_save[16*4] ALIGN16; |
769 | int cr0; | 771 | int cr0; |
770 | 772 | ||
@@ -776,65 +778,65 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
776 | because we modify p4 and p5 there, but we can't mark them | 778 | because we modify p4 and p5 there, but we can't mark them |
777 | as read/write, otherwise we'd overflow the 10-asm-operands | 779 | as read/write, otherwise we'd overflow the 10-asm-operands |
778 | limit of GCC < 3.1. */ | 780 | limit of GCC < 3.1. */ |
779 | __asm__ ("" : "+r" (p4), "+r" (p5)); | 781 | asm("" : "+r" (p4), "+r" (p5)); |
780 | 782 | ||
781 | __asm__ __volatile__ ( | 783 | asm volatile( |
782 | #undef BLOCK | 784 | #undef BLOCK |
783 | #define BLOCK(i) \ | 785 | #define BLOCK(i) \ |
784 | PF1(i) \ | 786 | PF1(i) \ |
785 | PF1(i+2) \ | 787 | PF1(i + 2) \ |
786 | LD(i,0) \ | 788 | LD(i,0) \ |
787 | LD(i+1,1) \ | 789 | LD(i + 1, 1) \ |
788 | LD(i+2,2) \ | 790 | LD(i + 2, 2) \ |
789 | LD(i+3,3) \ | 791 | LD(i + 3, 3) \ |
790 | PF2(i) \ | 792 | PF2(i) \ |
791 | PF2(i+2) \ | 793 | PF2(i + 2) \ |
792 | XO1(i,0) \ | 794 | XO1(i,0) \ |
793 | XO1(i+1,1) \ | 795 | XO1(i + 1, 1) \ |
794 | XO1(i+2,2) \ | 796 | XO1(i + 2, 2) \ |
795 | XO1(i+3,3) \ | 797 | XO1(i + 3, 3) \ |
796 | PF3(i) \ | 798 | PF3(i) \ |
797 | PF3(i+2) \ | 799 | PF3(i + 2) \ |
798 | XO2(i,0) \ | 800 | XO2(i,0) \ |
799 | XO2(i+1,1) \ | 801 | XO2(i + 1, 1) \ |
800 | XO2(i+2,2) \ | 802 | XO2(i + 2, 2) \ |
801 | XO2(i+3,3) \ | 803 | XO2(i + 3, 3) \ |
802 | PF4(i) \ | 804 | PF4(i) \ |
803 | PF4(i+2) \ | 805 | PF4(i + 2) \ |
804 | PF0(i+4) \ | 806 | PF0(i + 4) \ |
805 | PF0(i+6) \ | 807 | PF0(i + 6) \ |
806 | XO3(i,0) \ | 808 | XO3(i,0) \ |
807 | XO3(i+1,1) \ | 809 | XO3(i + 1, 1) \ |
808 | XO3(i+2,2) \ | 810 | XO3(i + 2, 2) \ |
809 | XO3(i+3,3) \ | 811 | XO3(i + 3, 3) \ |
810 | XO4(i,0) \ | 812 | XO4(i,0) \ |
811 | XO4(i+1,1) \ | 813 | XO4(i + 1, 1) \ |
812 | XO4(i+2,2) \ | 814 | XO4(i + 2, 2) \ |
813 | XO4(i+3,3) \ | 815 | XO4(i + 3, 3) \ |
814 | ST(i,0) \ | 816 | ST(i,0) \ |
815 | ST(i+1,1) \ | 817 | ST(i + 1, 1) \ |
816 | ST(i+2,2) \ | 818 | ST(i + 2, 2) \ |
817 | ST(i+3,3) \ | 819 | ST(i + 3, 3) \ |
818 | 820 | ||
819 | 821 | ||
820 | PF0(0) | 822 | PF0(0) |
821 | PF0(2) | 823 | PF0(2) |
822 | 824 | ||
823 | " .align 32 ;\n" | 825 | " .align 32 ;\n" |
824 | " 1: ;\n" | 826 | " 1: ;\n" |
825 | 827 | ||
826 | BLOCK(0) | 828 | BLOCK(0) |
827 | BLOCK(4) | 829 | BLOCK(4) |
828 | BLOCK(8) | 830 | BLOCK(8) |
829 | BLOCK(12) | 831 | BLOCK(12) |
830 | 832 | ||
831 | " addl $256, %1 ;\n" | 833 | " addl $256, %1 ;\n" |
832 | " addl $256, %2 ;\n" | 834 | " addl $256, %2 ;\n" |
833 | " addl $256, %3 ;\n" | 835 | " addl $256, %3 ;\n" |
834 | " addl $256, %4 ;\n" | 836 | " addl $256, %4 ;\n" |
835 | " addl $256, %5 ;\n" | 837 | " addl $256, %5 ;\n" |
836 | " decl %0 ;\n" | 838 | " decl %0 ;\n" |
837 | " jnz 1b ;\n" | 839 | " jnz 1b ;\n" |
838 | : "+r" (lines), | 840 | : "+r" (lines), |
839 | "+r" (p1), "+r" (p2), "+r" (p3) | 841 | "+r" (p1), "+r" (p2), "+r" (p3) |
840 | : "r" (p4), "r" (p5) | 842 | : "r" (p4), "r" (p5) |
@@ -843,17 +845,17 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
843 | /* p4 and p5 were modified, and now the variables are dead. | 845 | /* p4 and p5 were modified, and now the variables are dead. |
844 | Clobber them just to be sure nobody does something stupid | 846 | Clobber them just to be sure nobody does something stupid |
845 | like assuming they have some legal value. */ | 847 | like assuming they have some legal value. */ |
846 | __asm__ ("" : "=r" (p4), "=r" (p5)); | 848 | asm("" : "=r" (p4), "=r" (p5)); |
847 | 849 | ||
848 | XMMS_RESTORE; | 850 | XMMS_RESTORE; |
849 | } | 851 | } |
850 | 852 | ||
851 | static struct xor_block_template xor_block_pIII_sse = { | 853 | static struct xor_block_template xor_block_pIII_sse = { |
852 | .name = "pIII_sse", | 854 | .name = "pIII_sse", |
853 | .do_2 = xor_sse_2, | 855 | .do_2 = xor_sse_2, |
854 | .do_3 = xor_sse_3, | 856 | .do_3 = xor_sse_3, |
855 | .do_4 = xor_sse_4, | 857 | .do_4 = xor_sse_4, |
856 | .do_5 = xor_sse_5, | 858 | .do_5 = xor_sse_5, |
857 | }; | 859 | }; |
858 | 860 | ||
859 | /* Also try the generic routines. */ | 861 | /* Also try the generic routines. */ |
@@ -861,21 +863,21 @@ static struct xor_block_template xor_block_pIII_sse = { | |||
861 | 863 | ||
862 | #undef XOR_TRY_TEMPLATES | 864 | #undef XOR_TRY_TEMPLATES |
863 | #define XOR_TRY_TEMPLATES \ | 865 | #define XOR_TRY_TEMPLATES \ |
864 | do { \ | 866 | do { \ |
865 | xor_speed(&xor_block_8regs); \ | 867 | xor_speed(&xor_block_8regs); \ |
866 | xor_speed(&xor_block_8regs_p); \ | 868 | xor_speed(&xor_block_8regs_p); \ |
867 | xor_speed(&xor_block_32regs); \ | 869 | xor_speed(&xor_block_32regs); \ |
868 | xor_speed(&xor_block_32regs_p); \ | 870 | xor_speed(&xor_block_32regs_p); \ |
869 | if (cpu_has_xmm) \ | 871 | if (cpu_has_xmm) \ |
870 | xor_speed(&xor_block_pIII_sse); \ | 872 | xor_speed(&xor_block_pIII_sse); \ |
871 | if (cpu_has_mmx) { \ | 873 | if (cpu_has_mmx) { \ |
872 | xor_speed(&xor_block_pII_mmx); \ | 874 | xor_speed(&xor_block_pII_mmx); \ |
873 | xor_speed(&xor_block_p5_mmx); \ | 875 | xor_speed(&xor_block_p5_mmx); \ |
874 | } \ | 876 | } \ |
875 | } while (0) | 877 | } while (0) |
876 | 878 | ||
877 | /* We force the use of the SSE xor block because it can write around L2. | 879 | /* We force the use of the SSE xor block because it can write around L2. |
878 | We may also be able to load into the L1 only depending on how the cpu | 880 | We may also be able to load into the L1 only depending on how the cpu |
879 | deals with a load to a line that is being prefetched. */ | 881 | deals with a load to a line that is being prefetched. */ |
880 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | 882 | #define XOR_SELECT_TEMPLATE(FASTEST) \ |
881 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) | 883 | (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) |
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h index 1eee7fcb2420..24957e39ac8a 100644 --- a/include/asm-x86/xor_64.h +++ b/include/asm-x86/xor_64.h | |||
@@ -24,20 +24,23 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * x86-64 changes / gcc fixes from Andi Kleen. | 27 | * x86-64 changes / gcc fixes from Andi Kleen. |
28 | * Copyright 2002 Andi Kleen, SuSE Labs. | 28 | * Copyright 2002 Andi Kleen, SuSE Labs. |
29 | * | 29 | * |
30 | * This hasn't been optimized for the hammer yet, but there are likely | 30 | * This hasn't been optimized for the hammer yet, but there are likely |
31 | * no advantages to be gotten from x86-64 here anyways. | 31 | * no advantages to be gotten from x86-64 here anyways. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; | 34 | typedef struct { |
35 | unsigned long a, b; | ||
36 | } __attribute__((aligned(16))) xmm_store_t; | ||
35 | 37 | ||
36 | /* Doesn't use gcc to save the XMM registers, because there is no easy way to | 38 | /* Doesn't use gcc to save the XMM registers, because there is no easy way to |
37 | tell it to do a clts before the register saving. */ | 39 | tell it to do a clts before the register saving. */ |
38 | #define XMMS_SAVE do { \ | 40 | #define XMMS_SAVE \ |
41 | do { \ | ||
39 | preempt_disable(); \ | 42 | preempt_disable(); \ |
40 | asm volatile ( \ | 43 | asm volatile( \ |
41 | "movq %%cr0,%0 ;\n\t" \ | 44 | "movq %%cr0,%0 ;\n\t" \ |
42 | "clts ;\n\t" \ | 45 | "clts ;\n\t" \ |
43 | "movups %%xmm0,(%1) ;\n\t" \ | 46 | "movups %%xmm0,(%1) ;\n\t" \ |
@@ -47,10 +50,11 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; | |||
47 | : "=&r" (cr0) \ | 50 | : "=&r" (cr0) \ |
48 | : "r" (xmm_save) \ | 51 | : "r" (xmm_save) \ |
49 | : "memory"); \ | 52 | : "memory"); \ |
50 | } while(0) | 53 | } while (0) |
51 | 54 | ||
52 | #define XMMS_RESTORE do { \ | 55 | #define XMMS_RESTORE \ |
53 | asm volatile ( \ | 56 | do { \ |
57 | asm volatile( \ | ||
54 | "sfence ;\n\t" \ | 58 | "sfence ;\n\t" \ |
55 | "movups (%1),%%xmm0 ;\n\t" \ | 59 | "movups (%1),%%xmm0 ;\n\t" \ |
56 | "movups 0x10(%1),%%xmm1 ;\n\t" \ | 60 | "movups 0x10(%1),%%xmm1 ;\n\t" \ |
@@ -61,72 +65,72 @@ typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t; | |||
61 | : "r" (cr0), "r" (xmm_save) \ | 65 | : "r" (cr0), "r" (xmm_save) \ |
62 | : "memory"); \ | 66 | : "memory"); \ |
63 | preempt_enable(); \ | 67 | preempt_enable(); \ |
64 | } while(0) | 68 | } while (0) |
65 | 69 | ||
66 | #define OFFS(x) "16*("#x")" | 70 | #define OFFS(x) "16*("#x")" |
67 | #define PF_OFFS(x) "256+16*("#x")" | 71 | #define PF_OFFS(x) "256+16*("#x")" |
68 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" | 72 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" |
69 | #define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" | 73 | #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" |
70 | #define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" | 74 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" |
71 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" | 75 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" |
72 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" | 76 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" |
73 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" | 77 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" |
74 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" | 78 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" |
75 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" | 79 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" |
76 | #define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" | 80 | #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" |
77 | #define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" | 81 | #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" |
78 | #define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" | 82 | #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" |
79 | #define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" | 83 | #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" |
80 | #define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" | 84 | #define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" |
81 | 85 | ||
82 | 86 | ||
83 | static void | 87 | static void |
84 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | 88 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) |
85 | { | 89 | { |
86 | unsigned int lines = bytes >> 8; | 90 | unsigned int lines = bytes >> 8; |
87 | unsigned long cr0; | 91 | unsigned long cr0; |
88 | xmm_store_t xmm_save[4]; | 92 | xmm_store_t xmm_save[4]; |
89 | 93 | ||
90 | XMMS_SAVE; | 94 | XMMS_SAVE; |
91 | 95 | ||
92 | asm volatile ( | 96 | asm volatile( |
93 | #undef BLOCK | 97 | #undef BLOCK |
94 | #define BLOCK(i) \ | 98 | #define BLOCK(i) \ |
95 | LD(i,0) \ | 99 | LD(i, 0) \ |
96 | LD(i+1,1) \ | 100 | LD(i + 1, 1) \ |
97 | PF1(i) \ | 101 | PF1(i) \ |
98 | PF1(i+2) \ | 102 | PF1(i + 2) \ |
99 | LD(i+2,2) \ | 103 | LD(i + 2, 2) \ |
100 | LD(i+3,3) \ | 104 | LD(i + 3, 3) \ |
101 | PF0(i+4) \ | 105 | PF0(i + 4) \ |
102 | PF0(i+6) \ | 106 | PF0(i + 6) \ |
103 | XO1(i,0) \ | 107 | XO1(i, 0) \ |
104 | XO1(i+1,1) \ | 108 | XO1(i + 1, 1) \ |
105 | XO1(i+2,2) \ | 109 | XO1(i + 2, 2) \ |
106 | XO1(i+3,3) \ | 110 | XO1(i + 3, 3) \ |
107 | ST(i,0) \ | 111 | ST(i, 0) \ |
108 | ST(i+1,1) \ | 112 | ST(i + 1, 1) \ |
109 | ST(i+2,2) \ | 113 | ST(i + 2, 2) \ |
110 | ST(i+3,3) \ | 114 | ST(i + 3, 3) \ |
111 | 115 | ||
112 | 116 | ||
113 | PF0(0) | 117 | PF0(0) |
114 | PF0(2) | 118 | PF0(2) |
115 | 119 | ||
116 | " .align 32 ;\n" | 120 | " .align 32 ;\n" |
117 | " 1: ;\n" | 121 | " 1: ;\n" |
118 | 122 | ||
119 | BLOCK(0) | 123 | BLOCK(0) |
120 | BLOCK(4) | 124 | BLOCK(4) |
121 | BLOCK(8) | 125 | BLOCK(8) |
122 | BLOCK(12) | 126 | BLOCK(12) |
123 | 127 | ||
124 | " addq %[inc], %[p1] ;\n" | 128 | " addq %[inc], %[p1] ;\n" |
125 | " addq %[inc], %[p2] ;\n" | 129 | " addq %[inc], %[p2] ;\n" |
126 | " decl %[cnt] ; jnz 1b" | 130 | " decl %[cnt] ; jnz 1b" |
127 | : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) | 131 | : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) |
128 | : [inc] "r" (256UL) | 132 | : [inc] "r" (256UL) |
129 | : "memory"); | 133 | : "memory"); |
130 | 134 | ||
131 | XMMS_RESTORE; | 135 | XMMS_RESTORE; |
132 | } | 136 | } |
@@ -141,52 +145,52 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
141 | 145 | ||
142 | XMMS_SAVE; | 146 | XMMS_SAVE; |
143 | 147 | ||
144 | __asm__ __volatile__ ( | 148 | asm volatile( |
145 | #undef BLOCK | 149 | #undef BLOCK |
146 | #define BLOCK(i) \ | 150 | #define BLOCK(i) \ |
147 | PF1(i) \ | 151 | PF1(i) \ |
148 | PF1(i+2) \ | 152 | PF1(i + 2) \ |
149 | LD(i,0) \ | 153 | LD(i, 0) \ |
150 | LD(i+1,1) \ | 154 | LD(i + 1, 1) \ |
151 | LD(i+2,2) \ | 155 | LD(i + 2, 2) \ |
152 | LD(i+3,3) \ | 156 | LD(i + 3, 3) \ |
153 | PF2(i) \ | 157 | PF2(i) \ |
154 | PF2(i+2) \ | 158 | PF2(i + 2) \ |
155 | PF0(i+4) \ | 159 | PF0(i + 4) \ |
156 | PF0(i+6) \ | 160 | PF0(i + 6) \ |
157 | XO1(i,0) \ | 161 | XO1(i, 0) \ |
158 | XO1(i+1,1) \ | 162 | XO1(i + 1, 1) \ |
159 | XO1(i+2,2) \ | 163 | XO1(i + 2, 2) \ |
160 | XO1(i+3,3) \ | 164 | XO1(i + 3, 3) \ |
161 | XO2(i,0) \ | 165 | XO2(i, 0) \ |
162 | XO2(i+1,1) \ | 166 | XO2(i + 1, 1) \ |
163 | XO2(i+2,2) \ | 167 | XO2(i + 2, 2) \ |
164 | XO2(i+3,3) \ | 168 | XO2(i + 3, 3) \ |
165 | ST(i,0) \ | 169 | ST(i, 0) \ |
166 | ST(i+1,1) \ | 170 | ST(i + 1, 1) \ |
167 | ST(i+2,2) \ | 171 | ST(i + 2, 2) \ |
168 | ST(i+3,3) \ | 172 | ST(i + 3, 3) \ |
169 | 173 | ||
170 | 174 | ||
171 | PF0(0) | 175 | PF0(0) |
172 | PF0(2) | 176 | PF0(2) |
173 | 177 | ||
174 | " .align 32 ;\n" | 178 | " .align 32 ;\n" |
175 | " 1: ;\n" | 179 | " 1: ;\n" |
176 | 180 | ||
177 | BLOCK(0) | 181 | BLOCK(0) |
178 | BLOCK(4) | 182 | BLOCK(4) |
179 | BLOCK(8) | 183 | BLOCK(8) |
180 | BLOCK(12) | 184 | BLOCK(12) |
181 | 185 | ||
182 | " addq %[inc], %[p1] ;\n" | 186 | " addq %[inc], %[p1] ;\n" |
183 | " addq %[inc], %[p2] ;\n" | 187 | " addq %[inc], %[p2] ;\n" |
184 | " addq %[inc], %[p3] ;\n" | 188 | " addq %[inc], %[p3] ;\n" |
185 | " decl %[cnt] ; jnz 1b" | 189 | " decl %[cnt] ; jnz 1b" |
186 | : [cnt] "+r" (lines), | 190 | : [cnt] "+r" (lines), |
187 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | 191 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) |
188 | : [inc] "r" (256UL) | 192 | : [inc] "r" (256UL) |
189 | : "memory"); | 193 | : "memory"); |
190 | XMMS_RESTORE; | 194 | XMMS_RESTORE; |
191 | } | 195 | } |
192 | 196 | ||
@@ -195,64 +199,64 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
195 | unsigned long *p3, unsigned long *p4) | 199 | unsigned long *p3, unsigned long *p4) |
196 | { | 200 | { |
197 | unsigned int lines = bytes >> 8; | 201 | unsigned int lines = bytes >> 8; |
198 | xmm_store_t xmm_save[4]; | 202 | xmm_store_t xmm_save[4]; |
199 | unsigned long cr0; | 203 | unsigned long cr0; |
200 | 204 | ||
201 | XMMS_SAVE; | 205 | XMMS_SAVE; |
202 | 206 | ||
203 | __asm__ __volatile__ ( | 207 | asm volatile( |
204 | #undef BLOCK | 208 | #undef BLOCK |
205 | #define BLOCK(i) \ | 209 | #define BLOCK(i) \ |
206 | PF1(i) \ | 210 | PF1(i) \ |
207 | PF1(i+2) \ | 211 | PF1(i + 2) \ |
208 | LD(i,0) \ | 212 | LD(i, 0) \ |
209 | LD(i+1,1) \ | 213 | LD(i + 1, 1) \ |
210 | LD(i+2,2) \ | 214 | LD(i + 2, 2) \ |
211 | LD(i+3,3) \ | 215 | LD(i + 3, 3) \ |
212 | PF2(i) \ | 216 | PF2(i) \ |
213 | PF2(i+2) \ | 217 | PF2(i + 2) \ |
214 | XO1(i,0) \ | 218 | XO1(i, 0) \ |
215 | XO1(i+1,1) \ | 219 | XO1(i + 1, 1) \ |
216 | XO1(i+2,2) \ | 220 | XO1(i + 2, 2) \ |
217 | XO1(i+3,3) \ | 221 | XO1(i + 3, 3) \ |
218 | PF3(i) \ | 222 | PF3(i) \ |
219 | PF3(i+2) \ | 223 | PF3(i + 2) \ |
220 | PF0(i+4) \ | 224 | PF0(i + 4) \ |
221 | PF0(i+6) \ | 225 | PF0(i + 6) \ |
222 | XO2(i,0) \ | 226 | XO2(i, 0) \ |
223 | XO2(i+1,1) \ | 227 | XO2(i + 1, 1) \ |
224 | XO2(i+2,2) \ | 228 | XO2(i + 2, 2) \ |
225 | XO2(i+3,3) \ | 229 | XO2(i + 3, 3) \ |
226 | XO3(i,0) \ | 230 | XO3(i, 0) \ |
227 | XO3(i+1,1) \ | 231 | XO3(i + 1, 1) \ |
228 | XO3(i+2,2) \ | 232 | XO3(i + 2, 2) \ |
229 | XO3(i+3,3) \ | 233 | XO3(i + 3, 3) \ |
230 | ST(i,0) \ | 234 | ST(i, 0) \ |
231 | ST(i+1,1) \ | 235 | ST(i + 1, 1) \ |
232 | ST(i+2,2) \ | 236 | ST(i + 2, 2) \ |
233 | ST(i+3,3) \ | 237 | ST(i + 3, 3) \ |
234 | 238 | ||
235 | 239 | ||
236 | PF0(0) | 240 | PF0(0) |
237 | PF0(2) | 241 | PF0(2) |
238 | 242 | ||
239 | " .align 32 ;\n" | 243 | " .align 32 ;\n" |
240 | " 1: ;\n" | 244 | " 1: ;\n" |
241 | 245 | ||
242 | BLOCK(0) | 246 | BLOCK(0) |
243 | BLOCK(4) | 247 | BLOCK(4) |
244 | BLOCK(8) | 248 | BLOCK(8) |
245 | BLOCK(12) | 249 | BLOCK(12) |
246 | 250 | ||
247 | " addq %[inc], %[p1] ;\n" | 251 | " addq %[inc], %[p1] ;\n" |
248 | " addq %[inc], %[p2] ;\n" | 252 | " addq %[inc], %[p2] ;\n" |
249 | " addq %[inc], %[p3] ;\n" | 253 | " addq %[inc], %[p3] ;\n" |
250 | " addq %[inc], %[p4] ;\n" | 254 | " addq %[inc], %[p4] ;\n" |
251 | " decl %[cnt] ; jnz 1b" | 255 | " decl %[cnt] ; jnz 1b" |
252 | : [cnt] "+c" (lines), | 256 | : [cnt] "+c" (lines), |
253 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | 257 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) |
254 | : [inc] "r" (256UL) | 258 | : [inc] "r" (256UL) |
255 | : "memory" ); | 259 | : "memory" ); |
256 | 260 | ||
257 | XMMS_RESTORE; | 261 | XMMS_RESTORE; |
258 | } | 262 | } |
@@ -261,70 +265,70 @@ static void | |||
261 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | 265 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, |
262 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | 266 | unsigned long *p3, unsigned long *p4, unsigned long *p5) |
263 | { | 267 | { |
264 | unsigned int lines = bytes >> 8; | 268 | unsigned int lines = bytes >> 8; |
265 | xmm_store_t xmm_save[4]; | 269 | xmm_store_t xmm_save[4]; |
266 | unsigned long cr0; | 270 | unsigned long cr0; |
267 | 271 | ||
268 | XMMS_SAVE; | 272 | XMMS_SAVE; |
269 | 273 | ||
270 | __asm__ __volatile__ ( | 274 | asm volatile( |
271 | #undef BLOCK | 275 | #undef BLOCK |
272 | #define BLOCK(i) \ | 276 | #define BLOCK(i) \ |
273 | PF1(i) \ | 277 | PF1(i) \ |
274 | PF1(i+2) \ | 278 | PF1(i + 2) \ |
275 | LD(i,0) \ | 279 | LD(i, 0) \ |
276 | LD(i+1,1) \ | 280 | LD(i + 1, 1) \ |
277 | LD(i+2,2) \ | 281 | LD(i + 2, 2) \ |
278 | LD(i+3,3) \ | 282 | LD(i + 3, 3) \ |
279 | PF2(i) \ | 283 | PF2(i) \ |
280 | PF2(i+2) \ | 284 | PF2(i + 2) \ |
281 | XO1(i,0) \ | 285 | XO1(i, 0) \ |
282 | XO1(i+1,1) \ | 286 | XO1(i + 1, 1) \ |
283 | XO1(i+2,2) \ | 287 | XO1(i + 2, 2) \ |
284 | XO1(i+3,3) \ | 288 | XO1(i + 3, 3) \ |
285 | PF3(i) \ | 289 | PF3(i) \ |
286 | PF3(i+2) \ | 290 | PF3(i + 2) \ |
287 | XO2(i,0) \ | 291 | XO2(i, 0) \ |
288 | XO2(i+1,1) \ | 292 | XO2(i + 1, 1) \ |
289 | XO2(i+2,2) \ | 293 | XO2(i + 2, 2) \ |
290 | XO2(i+3,3) \ | 294 | XO2(i + 3, 3) \ |
291 | PF4(i) \ | 295 | PF4(i) \ |
292 | PF4(i+2) \ | 296 | PF4(i + 2) \ |
293 | PF0(i+4) \ | 297 | PF0(i + 4) \ |
294 | PF0(i+6) \ | 298 | PF0(i + 6) \ |
295 | XO3(i,0) \ | 299 | XO3(i, 0) \ |
296 | XO3(i+1,1) \ | 300 | XO3(i + 1, 1) \ |
297 | XO3(i+2,2) \ | 301 | XO3(i + 2, 2) \ |
298 | XO3(i+3,3) \ | 302 | XO3(i + 3, 3) \ |
299 | XO4(i,0) \ | 303 | XO4(i, 0) \ |
300 | XO4(i+1,1) \ | 304 | XO4(i + 1, 1) \ |
301 | XO4(i+2,2) \ | 305 | XO4(i + 2, 2) \ |
302 | XO4(i+3,3) \ | 306 | XO4(i + 3, 3) \ |
303 | ST(i,0) \ | 307 | ST(i, 0) \ |
304 | ST(i+1,1) \ | 308 | ST(i + 1, 1) \ |
305 | ST(i+2,2) \ | 309 | ST(i + 2, 2) \ |
306 | ST(i+3,3) \ | 310 | ST(i + 3, 3) \ |
307 | 311 | ||
308 | 312 | ||
309 | PF0(0) | 313 | PF0(0) |
310 | PF0(2) | 314 | PF0(2) |
311 | 315 | ||
312 | " .align 32 ;\n" | 316 | " .align 32 ;\n" |
313 | " 1: ;\n" | 317 | " 1: ;\n" |
314 | 318 | ||
315 | BLOCK(0) | 319 | BLOCK(0) |
316 | BLOCK(4) | 320 | BLOCK(4) |
317 | BLOCK(8) | 321 | BLOCK(8) |
318 | BLOCK(12) | 322 | BLOCK(12) |
319 | 323 | ||
320 | " addq %[inc], %[p1] ;\n" | 324 | " addq %[inc], %[p1] ;\n" |
321 | " addq %[inc], %[p2] ;\n" | 325 | " addq %[inc], %[p2] ;\n" |
322 | " addq %[inc], %[p3] ;\n" | 326 | " addq %[inc], %[p3] ;\n" |
323 | " addq %[inc], %[p4] ;\n" | 327 | " addq %[inc], %[p4] ;\n" |
324 | " addq %[inc], %[p5] ;\n" | 328 | " addq %[inc], %[p5] ;\n" |
325 | " decl %[cnt] ; jnz 1b" | 329 | " decl %[cnt] ; jnz 1b" |
326 | : [cnt] "+c" (lines), | 330 | : [cnt] "+c" (lines), |
327 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), | 331 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), |
328 | [p5] "+r" (p5) | 332 | [p5] "+r" (p5) |
329 | : [inc] "r" (256UL) | 333 | : [inc] "r" (256UL) |
330 | : "memory"); | 334 | : "memory"); |
@@ -333,18 +337,18 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | |||
333 | } | 337 | } |
334 | 338 | ||
335 | static struct xor_block_template xor_block_sse = { | 339 | static struct xor_block_template xor_block_sse = { |
336 | .name = "generic_sse", | 340 | .name = "generic_sse", |
337 | .do_2 = xor_sse_2, | 341 | .do_2 = xor_sse_2, |
338 | .do_3 = xor_sse_3, | 342 | .do_3 = xor_sse_3, |
339 | .do_4 = xor_sse_4, | 343 | .do_4 = xor_sse_4, |
340 | .do_5 = xor_sse_5, | 344 | .do_5 = xor_sse_5, |
341 | }; | 345 | }; |
342 | 346 | ||
343 | #undef XOR_TRY_TEMPLATES | 347 | #undef XOR_TRY_TEMPLATES |
344 | #define XOR_TRY_TEMPLATES \ | 348 | #define XOR_TRY_TEMPLATES \ |
345 | do { \ | 349 | do { \ |
346 | xor_speed(&xor_block_sse); \ | 350 | xor_speed(&xor_block_sse); \ |
347 | } while (0) | 351 | } while (0) |
348 | 352 | ||
349 | /* We force the use of the SSE xor block because it can write around L2. | 353 | /* We force the use of the SSE xor block because it can write around L2. |
350 | We may also be able to load into the L1 only depending on how the cpu | 354 | We may also be able to load into the L1 only depending on how the cpu |
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h index 3e04167cd9dc..d9b2034ed1d2 100644 --- a/include/asm-xtensa/semaphore.h +++ b/include/asm-xtensa/semaphore.h | |||
@@ -1,99 +1 @@ | |||
1 | /* | #include <linux/semaphore.h> | |
2 | * linux/include/asm-xtensa/semaphore.h | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef _XTENSA_SEMAPHORE_H | ||
12 | #define _XTENSA_SEMAPHORE_H | ||
13 | |||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/system.h> | ||
16 | #include <linux/wait.h> | ||
17 | #include <linux/rwsem.h> | ||
18 | |||
19 | struct semaphore { | ||
20 | atomic_t count; | ||
21 | int sleepers; | ||
22 | wait_queue_head_t wait; | ||
23 | }; | ||
24 | |||
25 | #define __SEMAPHORE_INITIALIZER(name,n) \ | ||
26 | { \ | ||
27 | .count = ATOMIC_INIT(n), \ | ||
28 | .sleepers = 0, \ | ||
29 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
30 | } | ||
31 | |||
32 | #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ | ||
33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
34 | |||
35 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) | ||
36 | |||
37 | static inline void sema_init (struct semaphore *sem, int val) | ||
38 | { | ||
39 | atomic_set(&sem->count, val); | ||
40 | sem->sleepers = 0; | ||
41 | init_waitqueue_head(&sem->wait); | ||
42 | } | ||
43 | |||
44 | static inline void init_MUTEX (struct semaphore *sem) | ||
45 | { | ||
46 | sema_init(sem, 1); | ||
47 | } | ||
48 | |||
49 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
50 | { | ||
51 | sema_init(sem, 0); | ||
52 | } | ||
53 | |||
54 | asmlinkage void __down(struct semaphore * sem); | ||
55 | asmlinkage int __down_interruptible(struct semaphore * sem); | ||
56 | asmlinkage int __down_trylock(struct semaphore * sem); | ||
57 | asmlinkage void __up(struct semaphore * sem); | ||
58 | |||
59 | extern spinlock_t semaphore_wake_lock; | ||
60 | |||
61 | static inline void down(struct semaphore * sem) | ||
62 | { | ||
63 | might_sleep(); | ||
64 | |||
65 | if (atomic_sub_return(1, &sem->count) < 0) | ||
66 | __down(sem); | ||
67 | } | ||
68 | |||
69 | static inline int down_interruptible(struct semaphore * sem) | ||
70 | { | ||
71 | int ret = 0; | ||
72 | |||
73 | might_sleep(); | ||
74 | |||
75 | if (atomic_sub_return(1, &sem->count) < 0) | ||
76 | ret = __down_interruptible(sem); | ||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static inline int down_trylock(struct semaphore * sem) | ||
81 | { | ||
82 | int ret = 0; | ||
83 | |||
84 | if (atomic_sub_return(1, &sem->count) < 0) | ||
85 | ret = __down_trylock(sem); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Note! This is subtle. We jump to wake people up only if | ||
91 | * the semaphore was negative (== somebody was waiting on it). | ||
92 | */ | ||
93 | static inline void up(struct semaphore * sem) | ||
94 | { | ||
95 | if (atomic_add_return(1, &sem->count) <= 0) | ||
96 | __up(sem); | ||
97 | } | ||
98 | |||
99 | #endif /* _XTENSA_SEMAPHORE_H */ | ||
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 9cdd12a9e843..b3d9ccde0c27 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -86,6 +86,7 @@ header-y += if_plip.h | |||
86 | header-y += if_ppp.h | 86 | header-y += if_ppp.h |
87 | header-y += if_slip.h | 87 | header-y += if_slip.h |
88 | header-y += if_strip.h | 88 | header-y += if_strip.h |
89 | header-y += if_tun.h | ||
89 | header-y += if_tunnel.h | 90 | header-y += if_tunnel.h |
90 | header-y += in6.h | 91 | header-y += in6.h |
91 | header-y += in_route.h | 92 | header-y += in_route.h |
@@ -229,9 +230,7 @@ unifdef-y += if_link.h | |||
229 | unifdef-y += if_pppol2tp.h | 230 | unifdef-y += if_pppol2tp.h |
230 | unifdef-y += if_pppox.h | 231 | unifdef-y += if_pppox.h |
231 | unifdef-y += if_tr.h | 232 | unifdef-y += if_tr.h |
232 | unifdef-y += if_tun.h | ||
233 | unifdef-y += if_vlan.h | 233 | unifdef-y += if_vlan.h |
234 | unifdef-y += if_wanpipe.h | ||
235 | unifdef-y += igmp.h | 234 | unifdef-y += igmp.h |
236 | unifdef-y += inet_diag.h | 235 | unifdef-y += inet_diag.h |
237 | unifdef-y += in.h | 236 | unifdef-y += in.h |
@@ -261,6 +260,7 @@ unifdef-y += mempolicy.h | |||
261 | unifdef-y += mii.h | 260 | unifdef-y += mii.h |
262 | unifdef-y += mman.h | 261 | unifdef-y += mman.h |
263 | unifdef-y += mroute.h | 262 | unifdef-y += mroute.h |
263 | unifdef-y += mroute6.h | ||
264 | unifdef-y += msdos_fs.h | 264 | unifdef-y += msdos_fs.h |
265 | unifdef-y += msg.h | 265 | unifdef-y += msg.h |
266 | unifdef-y += nbd.h | 266 | unifdef-y += nbd.h |
@@ -289,6 +289,7 @@ unifdef-y += parport.h | |||
289 | unifdef-y += patchkey.h | 289 | unifdef-y += patchkey.h |
290 | unifdef-y += pci.h | 290 | unifdef-y += pci.h |
291 | unifdef-y += personality.h | 291 | unifdef-y += personality.h |
292 | unifdef-y += pim.h | ||
292 | unifdef-y += pktcdvd.h | 293 | unifdef-y += pktcdvd.h |
293 | unifdef-y += pmu.h | 294 | unifdef-y += pmu.h |
294 | unifdef-y += poll.h | 295 | unifdef-y += poll.h |
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index fde675872c56..a1916078fd08 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h | |||
@@ -283,8 +283,8 @@ struct arcnet_local { | |||
283 | int next_buf, first_free_buf; | 283 | int next_buf, first_free_buf; |
284 | 284 | ||
285 | /* network "reconfiguration" handling */ | 285 | /* network "reconfiguration" handling */ |
286 | time_t first_recon, /* time of "first" RECON message to count */ | 286 | unsigned long first_recon; /* time of "first" RECON message to count */ |
287 | last_recon; /* time of most recent RECON */ | 287 | unsigned long last_recon; /* time of most recent RECON */ |
288 | int num_recons; /* number of RECONs between first and last. */ | 288 | int num_recons; /* number of RECONs between first and last. */ |
289 | bool network_down; /* do we think the network is down? */ | 289 | bool network_down; /* do we think the network is down? */ |
290 | 290 | ||
diff --git a/include/linux/atalk.h b/include/linux/atalk.h index ced8a1ed080c..e9ebac2e2ecc 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h | |||
@@ -85,8 +85,6 @@ static inline struct atalk_sock *at_sk(struct sock *sk) | |||
85 | return (struct atalk_sock *)sk; | 85 | return (struct atalk_sock *)sk; |
86 | } | 86 | } |
87 | 87 | ||
88 | #include <asm/byteorder.h> | ||
89 | |||
90 | struct ddpehdr { | 88 | struct ddpehdr { |
91 | __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */ | 89 | __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */ |
92 | __be16 deh_sum; | 90 | __be16 deh_sum; |
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h index f5582332af04..574b201b99d8 100644 --- a/include/linux/attribute_container.h +++ b/include/linux/attribute_container.h | |||
@@ -37,7 +37,7 @@ attribute_container_set_no_classdevs(struct attribute_container *atc) | |||
37 | } | 37 | } |
38 | 38 | ||
39 | int attribute_container_register(struct attribute_container *cont); | 39 | int attribute_container_register(struct attribute_container *cont); |
40 | int attribute_container_unregister(struct attribute_container *cont); | 40 | int __must_check attribute_container_unregister(struct attribute_container *cont); |
41 | void attribute_container_create_device(struct device *dev, | 41 | void attribute_container_create_device(struct device *dev, |
42 | int (*fn)(struct attribute_container *, | 42 | int (*fn)(struct attribute_container *, |
43 | struct device *, | 43 | struct device *, |
diff --git a/include/linux/audit.h b/include/linux/audit.h index 2af9ec025015..4ccb048cae1d 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -353,6 +353,33 @@ struct netlink_skb_parms; | |||
353 | struct linux_binprm; | 353 | struct linux_binprm; |
354 | struct mq_attr; | 354 | struct mq_attr; |
355 | struct mqstat; | 355 | struct mqstat; |
356 | struct audit_watch; | ||
357 | struct audit_tree; | ||
358 | |||
359 | struct audit_krule { | ||
360 | int vers_ops; | ||
361 | u32 flags; | ||
362 | u32 listnr; | ||
363 | u32 action; | ||
364 | u32 mask[AUDIT_BITMASK_SIZE]; | ||
365 | u32 buflen; /* for data alloc on list rules */ | ||
366 | u32 field_count; | ||
367 | char *filterkey; /* ties events to rules */ | ||
368 | struct audit_field *fields; | ||
369 | struct audit_field *arch_f; /* quick access to arch field */ | ||
370 | struct audit_field *inode_f; /* quick access to an inode field */ | ||
371 | struct audit_watch *watch; /* associated watch */ | ||
372 | struct audit_tree *tree; /* associated watched tree */ | ||
373 | struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ | ||
374 | }; | ||
375 | |||
376 | struct audit_field { | ||
377 | u32 type; | ||
378 | u32 val; | ||
379 | u32 op; | ||
380 | char *lsm_str; | ||
381 | void *lsm_rule; | ||
382 | }; | ||
356 | 383 | ||
357 | #define AUDITSC_INVALID 0 | 384 | #define AUDITSC_INVALID 0 |
358 | #define AUDITSC_SUCCESS 1 | 385 | #define AUDITSC_SUCCESS 1 |
@@ -536,6 +563,8 @@ extern void audit_log_d_path(struct audit_buffer *ab, | |||
536 | const char *prefix, | 563 | const char *prefix, |
537 | struct path *path); | 564 | struct path *path); |
538 | extern void audit_log_lost(const char *message); | 565 | extern void audit_log_lost(const char *message); |
566 | extern int audit_update_lsm_rules(void); | ||
567 | |||
539 | /* Private API (for audit.c only) */ | 568 | /* Private API (for audit.c only) */ |
540 | extern int audit_filter_user(struct netlink_skb_parms *cb, int type); | 569 | extern int audit_filter_user(struct netlink_skb_parms *cb, int type); |
541 | extern int audit_filter_type(int type); | 570 | extern int audit_filter_type(int type); |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 85778a4b1209..35094479ca55 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -216,6 +216,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, | |||
216 | /* used to install a new clocksource */ | 216 | /* used to install a new clocksource */ |
217 | extern int clocksource_register(struct clocksource*); | 217 | extern int clocksource_register(struct clocksource*); |
218 | extern void clocksource_unregister(struct clocksource*); | 218 | extern void clocksource_unregister(struct clocksource*); |
219 | extern void clocksource_touch_watchdog(void); | ||
219 | extern struct clocksource* clocksource_get_next(void); | 220 | extern struct clocksource* clocksource_get_next(void); |
220 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 221 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
221 | extern void clocksource_resume(void); | 222 | extern void clocksource_resume(void); |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 34d440698293..b4d84ed6187d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -95,12 +95,17 @@ enum dma_transaction_type { | |||
95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) | 95 | #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) |
96 | 96 | ||
97 | /** | 97 | /** |
98 | * enum dma_prep_flags - DMA flags to augment operation preparation | 98 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
99 | * control completion, and communicate status. | ||
99 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 100 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
100 | * this transaction | 101 | * this transaction |
102 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | ||
103 | * acknowledges receipt, i.e. has has a chance to establish any | ||
104 | * dependency chains | ||
101 | */ | 105 | */ |
102 | enum dma_prep_flags { | 106 | enum dma_ctrl_flags { |
103 | DMA_PREP_INTERRUPT = (1 << 0), | 107 | DMA_PREP_INTERRUPT = (1 << 0), |
108 | DMA_CTRL_ACK = (1 << 1), | ||
104 | }; | 109 | }; |
105 | 110 | ||
106 | /** | 111 | /** |
@@ -211,8 +216,8 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
211 | * ---dma generic offload fields--- | 216 | * ---dma generic offload fields--- |
212 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | 217 | * @cookie: tracking cookie for this transaction, set to -EBUSY if |
213 | * this tx is sitting on a dependency list | 218 | * this tx is sitting on a dependency list |
214 | * @ack: the descriptor can not be reused until the client acknowledges | 219 | * @flags: flags to augment operation preparation, control completion, and |
215 | * receipt, i.e. has has a chance to establish any dependency chains | 220 | * communicate status |
216 | * @phys: physical address of the descriptor | 221 | * @phys: physical address of the descriptor |
217 | * @tx_list: driver common field for operations that require multiple | 222 | * @tx_list: driver common field for operations that require multiple |
218 | * descriptors | 223 | * descriptors |
@@ -221,23 +226,20 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); | |||
221 | * @callback: routine to call after this operation is complete | 226 | * @callback: routine to call after this operation is complete |
222 | * @callback_param: general parameter to pass to the callback routine | 227 | * @callback_param: general parameter to pass to the callback routine |
223 | * ---async_tx api specific fields--- | 228 | * ---async_tx api specific fields--- |
224 | * @depend_list: at completion this list of transactions are submitted | 229 | * @next: at completion submit this descriptor |
225 | * @depend_node: allow this transaction to be executed after another | ||
226 | * transaction has completed, possibly on another channel | ||
227 | * @parent: pointer to the next level up in the dependency chain | 230 | * @parent: pointer to the next level up in the dependency chain |
228 | * @lock: protect the dependency list | 231 | * @lock: protect the parent and next pointers |
229 | */ | 232 | */ |
230 | struct dma_async_tx_descriptor { | 233 | struct dma_async_tx_descriptor { |
231 | dma_cookie_t cookie; | 234 | dma_cookie_t cookie; |
232 | int ack; | 235 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ |
233 | dma_addr_t phys; | 236 | dma_addr_t phys; |
234 | struct list_head tx_list; | 237 | struct list_head tx_list; |
235 | struct dma_chan *chan; | 238 | struct dma_chan *chan; |
236 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 239 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
237 | dma_async_tx_callback callback; | 240 | dma_async_tx_callback callback; |
238 | void *callback_param; | 241 | void *callback_param; |
239 | struct list_head depend_list; | 242 | struct dma_async_tx_descriptor *next; |
240 | struct list_head depend_node; | ||
241 | struct dma_async_tx_descriptor *parent; | 243 | struct dma_async_tx_descriptor *parent; |
242 | spinlock_t lock; | 244 | spinlock_t lock; |
243 | }; | 245 | }; |
@@ -261,7 +263,6 @@ struct dma_async_tx_descriptor { | |||
261 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | 263 | * @device_prep_dma_zero_sum: prepares a zero_sum operation |
262 | * @device_prep_dma_memset: prepares a memset operation | 264 | * @device_prep_dma_memset: prepares a memset operation |
263 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 265 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
264 | * @device_dependency_added: async_tx notifies the channel about new deps | ||
265 | * @device_issue_pending: push pending transactions to hardware | 266 | * @device_issue_pending: push pending transactions to hardware |
266 | */ | 267 | */ |
267 | struct dma_device { | 268 | struct dma_device { |
@@ -294,9 +295,8 @@ struct dma_device { | |||
294 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 295 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
295 | unsigned long flags); | 296 | unsigned long flags); |
296 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 297 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
297 | struct dma_chan *chan); | 298 | struct dma_chan *chan, unsigned long flags); |
298 | 299 | ||
299 | void (*device_dependency_added)(struct dma_chan *chan); | ||
300 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 300 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, |
301 | dma_cookie_t cookie, dma_cookie_t *last, | 301 | dma_cookie_t cookie, dma_cookie_t *last, |
302 | dma_cookie_t *used); | 302 | dma_cookie_t *used); |
@@ -321,7 +321,13 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
321 | static inline void | 321 | static inline void |
322 | async_tx_ack(struct dma_async_tx_descriptor *tx) | 322 | async_tx_ack(struct dma_async_tx_descriptor *tx) |
323 | { | 323 | { |
324 | tx->ack = 1; | 324 | tx->flags |= DMA_CTRL_ACK; |
325 | } | ||
326 | |||
327 | static inline int | ||
328 | async_tx_test_ack(struct dma_async_tx_descriptor *tx) | ||
329 | { | ||
330 | return tx->flags & DMA_CTRL_ACK; | ||
325 | } | 331 | } |
326 | 332 | ||
327 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 333 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ddfa0372a3b7..b6ea9aa9e853 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -37,21 +37,6 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */ | |||
37 | struct sock_filter __user *filter; | 37 | struct sock_filter __user *filter; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | #ifdef __KERNEL__ | ||
41 | struct sk_filter | ||
42 | { | ||
43 | atomic_t refcnt; | ||
44 | unsigned int len; /* Number of filter blocks */ | ||
45 | struct rcu_head rcu; | ||
46 | struct sock_filter insns[0]; | ||
47 | }; | ||
48 | |||
49 | static inline unsigned int sk_filter_len(struct sk_filter *fp) | ||
50 | { | ||
51 | return fp->len*sizeof(struct sock_filter) + sizeof(*fp); | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | /* | 40 | /* |
56 | * Instruction classes | 41 | * Instruction classes |
57 | */ | 42 | */ |
@@ -136,15 +121,31 @@ static inline unsigned int sk_filter_len(struct sk_filter *fp) | |||
136 | #define SKF_AD_PROTOCOL 0 | 121 | #define SKF_AD_PROTOCOL 0 |
137 | #define SKF_AD_PKTTYPE 4 | 122 | #define SKF_AD_PKTTYPE 4 |
138 | #define SKF_AD_IFINDEX 8 | 123 | #define SKF_AD_IFINDEX 8 |
139 | #define SKF_AD_MAX 12 | 124 | #define SKF_AD_NLATTR 12 |
125 | #define SKF_AD_MAX 16 | ||
140 | #define SKF_NET_OFF (-0x100000) | 126 | #define SKF_NET_OFF (-0x100000) |
141 | #define SKF_LL_OFF (-0x200000) | 127 | #define SKF_LL_OFF (-0x200000) |
142 | 128 | ||
143 | #ifdef __KERNEL__ | 129 | #ifdef __KERNEL__ |
130 | struct sk_filter | ||
131 | { | ||
132 | atomic_t refcnt; | ||
133 | unsigned int len; /* Number of filter blocks */ | ||
134 | struct rcu_head rcu; | ||
135 | struct sock_filter insns[0]; | ||
136 | }; | ||
137 | |||
138 | static inline unsigned int sk_filter_len(const struct sk_filter *fp) | ||
139 | { | ||
140 | return fp->len * sizeof(struct sock_filter) + sizeof(*fp); | ||
141 | } | ||
142 | |||
144 | struct sk_buff; | 143 | struct sk_buff; |
145 | struct sock; | 144 | struct sock; |
146 | 145 | ||
147 | extern unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen); | 146 | extern int sk_filter(struct sock *sk, struct sk_buff *skb); |
147 | extern unsigned int sk_run_filter(struct sk_buff *skb, | ||
148 | struct sock_filter *filter, int flen); | ||
148 | extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); | 149 | extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
149 | extern int sk_detach_filter(struct sock *sk); | 150 | extern int sk_detach_filter(struct sock *sk); |
150 | extern int sk_chk_filter(struct sock_filter *filter, int flen); | 151 | extern int sk_chk_filter(struct sock_filter *filter, int flen); |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 1831b196c70a..2cad5c67397e 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -50,7 +50,7 @@ struct gianfar_platform_data { | |||
50 | u32 device_flags; | 50 | u32 device_flags; |
51 | /* board specific information */ | 51 | /* board specific information */ |
52 | u32 board_flags; | 52 | u32 board_flags; |
53 | u32 bus_id; | 53 | char bus_id[MII_BUS_ID_SIZE]; |
54 | u32 phy_id; | 54 | u32 phy_id; |
55 | u8 mac_addr[6]; | 55 | u8 mac_addr[6]; |
56 | phy_interface_t interface; | 56 | phy_interface_t interface; |
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index e38e75967e74..c37e9241fae7 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h | |||
@@ -422,9 +422,11 @@ struct hd_geometry { | |||
422 | #define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */ | 422 | #define HDIO_SET_NOWERR 0x0325 /* change ignore-write-error flag */ |
423 | #define HDIO_SET_DMA 0x0326 /* change use-dma flag */ | 423 | #define HDIO_SET_DMA 0x0326 /* change use-dma flag */ |
424 | #define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */ | 424 | #define HDIO_SET_PIO_MODE 0x0327 /* reconfig interface to new speed */ |
425 | #ifndef __KERNEL__ | ||
425 | #define HDIO_SCAN_HWIF 0x0328 /* register and (re)scan interface */ | 426 | #define HDIO_SCAN_HWIF 0x0328 /* register and (re)scan interface */ |
426 | #define HDIO_SET_NICE 0x0329 /* set nice flags */ | ||
427 | #define HDIO_UNREGISTER_HWIF 0x032a /* unregister interface */ | 427 | #define HDIO_UNREGISTER_HWIF 0x032a /* unregister interface */ |
428 | #endif | ||
429 | #define HDIO_SET_NICE 0x0329 /* set nice flags */ | ||
428 | #define HDIO_SET_WCACHE 0x032b /* change write cache enable-disable */ | 430 | #define HDIO_SET_WCACHE 0x032b /* change write cache enable-disable */ |
429 | #define HDIO_SET_ACOUSTIC 0x032c /* change acoustic behavior */ | 431 | #define HDIO_SET_ACOUSTIC 0x032c /* change acoustic behavior */ |
430 | #define HDIO_SET_BUSSTATE 0x032d /* set the bus state of the hwif */ | 432 | #define HDIO_SET_BUSSTATE 0x032d /* set the bus state of the hwif */ |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1ad56a7b2f74..56f3236da829 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -173,7 +173,6 @@ struct hrtimer_clock_base { | |||
173 | * struct hrtimer_cpu_base - the per cpu clock bases | 173 | * struct hrtimer_cpu_base - the per cpu clock bases |
174 | * @lock: lock protecting the base and associated clock bases | 174 | * @lock: lock protecting the base and associated clock bases |
175 | * and timers | 175 | * and timers |
176 | * @lock_key: the lock_class_key for use with lockdep | ||
177 | * @clock_base: array of clock bases for this cpu | 176 | * @clock_base: array of clock bases for this cpu |
178 | * @curr_timer: the timer which is executing a callback right now | 177 | * @curr_timer: the timer which is executing a callback right now |
179 | * @expires_next: absolute time of the next event which was scheduled | 178 | * @expires_next: absolute time of the next event which was scheduled |
@@ -189,7 +188,6 @@ struct hrtimer_clock_base { | |||
189 | */ | 188 | */ |
190 | struct hrtimer_cpu_base { | 189 | struct hrtimer_cpu_base { |
191 | spinlock_t lock; | 190 | spinlock_t lock; |
192 | struct lock_class_key lock_key; | ||
193 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 191 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
194 | struct list_head cb_pending; | 192 | struct list_head cb_pending; |
195 | #ifdef CONFIG_HIGH_RES_TIMERS | 193 | #ifdef CONFIG_HIGH_RES_TIMERS |
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 7c5e9817e998..03067443198a 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h | |||
@@ -176,12 +176,21 @@ extern void icmpv6_send(struct sk_buff *skb, | |||
176 | __u32 info, | 176 | __u32 info, |
177 | struct net_device *dev); | 177 | struct net_device *dev); |
178 | 178 | ||
179 | extern int icmpv6_init(struct net_proto_family *ops); | 179 | extern int icmpv6_init(void); |
180 | extern int icmpv6_err_convert(int type, int code, | 180 | extern int icmpv6_err_convert(int type, int code, |
181 | int *err); | 181 | int *err); |
182 | extern void icmpv6_cleanup(void); | 182 | extern void icmpv6_cleanup(void); |
183 | extern void icmpv6_param_prob(struct sk_buff *skb, | 183 | extern void icmpv6_param_prob(struct sk_buff *skb, |
184 | int code, int pos); | 184 | int code, int pos); |
185 | |||
186 | struct flowi; | ||
187 | struct in6_addr; | ||
188 | extern void icmpv6_flow_init(struct sock *sk, | ||
189 | struct flowi *fl, | ||
190 | u8 type, | ||
191 | const struct in6_addr *saddr, | ||
192 | const struct in6_addr *daddr, | ||
193 | int oif); | ||
185 | #endif | 194 | #endif |
186 | 195 | ||
187 | #endif | 196 | #endif |
diff --git a/include/linux/ide.h b/include/linux/ide.h index bc26b2f27359..6c39482fd1a1 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -82,24 +82,10 @@ typedef unsigned char byte; /* used everywhere */ | |||
82 | 82 | ||
83 | #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET | 83 | #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET |
84 | #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET | 84 | #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET |
85 | 85 | #define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET | |
86 | #define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET]) | 86 | #define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET |
87 | #define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET]) | 87 | #define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET |
88 | #define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET]) | 88 | #define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET |
89 | #define IDE_SECTOR_REG (HWIF(drive)->io_ports[IDE_SECTOR_OFFSET]) | ||
90 | #define IDE_LCYL_REG (HWIF(drive)->io_ports[IDE_LCYL_OFFSET]) | ||
91 | #define IDE_HCYL_REG (HWIF(drive)->io_ports[IDE_HCYL_OFFSET]) | ||
92 | #define IDE_SELECT_REG (HWIF(drive)->io_ports[IDE_SELECT_OFFSET]) | ||
93 | #define IDE_STATUS_REG (HWIF(drive)->io_ports[IDE_STATUS_OFFSET]) | ||
94 | #define IDE_CONTROL_REG (HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]) | ||
95 | #define IDE_IRQ_REG (HWIF(drive)->io_ports[IDE_IRQ_OFFSET]) | ||
96 | |||
97 | #define IDE_FEATURE_REG IDE_ERROR_REG | ||
98 | #define IDE_COMMAND_REG IDE_STATUS_REG | ||
99 | #define IDE_ALTSTATUS_REG IDE_CONTROL_REG | ||
100 | #define IDE_IREASON_REG IDE_NSECTOR_REG | ||
101 | #define IDE_BCOUNTL_REG IDE_LCYL_REG | ||
102 | #define IDE_BCOUNTH_REG IDE_HCYL_REG | ||
103 | 89 | ||
104 | #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) | 90 | #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) |
105 | #define BAD_R_STAT (BUSY_STAT | ERR_STAT) | 91 | #define BAD_R_STAT (BUSY_STAT | ERR_STAT) |
@@ -169,7 +155,7 @@ enum { ide_unknown, ide_generic, ide_pci, | |||
169 | ide_rz1000, ide_trm290, | 155 | ide_rz1000, ide_trm290, |
170 | ide_cmd646, ide_cy82c693, ide_4drives, | 156 | ide_cmd646, ide_cy82c693, ide_4drives, |
171 | ide_pmac, ide_etrax100, ide_acorn, | 157 | ide_pmac, ide_etrax100, ide_acorn, |
172 | ide_au1xxx, ide_palm3710, ide_forced | 158 | ide_au1xxx, ide_palm3710 |
173 | }; | 159 | }; |
174 | 160 | ||
175 | typedef u8 hwif_chipset_t; | 161 | typedef u8 hwif_chipset_t; |
@@ -186,14 +172,9 @@ typedef struct hw_regs_s { | |||
186 | } hw_regs_t; | 172 | } hw_regs_t; |
187 | 173 | ||
188 | struct hwif_s * ide_find_port(unsigned long); | 174 | struct hwif_s * ide_find_port(unsigned long); |
189 | struct hwif_s *ide_deprecated_find_port(unsigned long); | ||
190 | void ide_init_port_data(struct hwif_s *, unsigned int); | 175 | void ide_init_port_data(struct hwif_s *, unsigned int); |
191 | void ide_init_port_hw(struct hwif_s *, hw_regs_t *); | 176 | void ide_init_port_hw(struct hwif_s *, hw_regs_t *); |
192 | 177 | ||
193 | struct ide_drive_s; | ||
194 | int ide_register_hw(hw_regs_t *, void (*)(struct ide_drive_s *), | ||
195 | struct hwif_s **); | ||
196 | |||
197 | static inline void ide_std_init_ports(hw_regs_t *hw, | 178 | static inline void ide_std_init_ports(hw_regs_t *hw, |
198 | unsigned long io_addr, | 179 | unsigned long io_addr, |
199 | unsigned long ctl_addr) | 180 | unsigned long ctl_addr) |
@@ -213,45 +194,6 @@ static inline void ide_std_init_ports(hw_regs_t *hw, | |||
213 | #define MAX_HWIFS CONFIG_IDE_MAX_HWIFS | 194 | #define MAX_HWIFS CONFIG_IDE_MAX_HWIFS |
214 | #endif | 195 | #endif |
215 | 196 | ||
216 | /* needed on alpha, x86/x86_64, ia64, mips, ppc32 and sh */ | ||
217 | #ifndef IDE_ARCH_OBSOLETE_DEFAULTS | ||
218 | # define ide_default_io_base(index) (0) | ||
219 | # define ide_default_irq(base) (0) | ||
220 | # define ide_init_default_irq(base) (0) | ||
221 | #endif | ||
222 | |||
223 | #ifdef CONFIG_IDE_ARCH_OBSOLETE_INIT | ||
224 | static inline void ide_init_hwif_ports(hw_regs_t *hw, | ||
225 | unsigned long io_addr, | ||
226 | unsigned long ctl_addr, | ||
227 | int *irq) | ||
228 | { | ||
229 | if (!ctl_addr) | ||
230 | ide_std_init_ports(hw, io_addr, ide_default_io_ctl(io_addr)); | ||
231 | else | ||
232 | ide_std_init_ports(hw, io_addr, ctl_addr); | ||
233 | |||
234 | if (irq) | ||
235 | *irq = 0; | ||
236 | |||
237 | hw->io_ports[IDE_IRQ_OFFSET] = 0; | ||
238 | |||
239 | #ifdef CONFIG_PPC32 | ||
240 | if (ppc_ide_md.ide_init_hwif) | ||
241 | ppc_ide_md.ide_init_hwif(hw, io_addr, ctl_addr, irq); | ||
242 | #endif | ||
243 | } | ||
244 | #else | ||
245 | static inline void ide_init_hwif_ports(hw_regs_t *hw, | ||
246 | unsigned long io_addr, | ||
247 | unsigned long ctl_addr, | ||
248 | int *irq) | ||
249 | { | ||
250 | if (io_addr || ctl_addr) | ||
251 | printk(KERN_WARNING "%s: must not be called\n", __FUNCTION__); | ||
252 | } | ||
253 | #endif /* CONFIG_IDE_ARCH_OBSOLETE_INIT */ | ||
254 | |||
255 | /* Currently only m68k, apus and m8xx need it */ | 197 | /* Currently only m68k, apus and m8xx need it */ |
256 | #ifndef IDE_ARCH_ACK_INTR | 198 | #ifndef IDE_ARCH_ACK_INTR |
257 | # define ide_ack_intr(hwif) (1) | 199 | # define ide_ack_intr(hwif) (1) |
@@ -406,7 +348,7 @@ typedef struct ide_drive_s { | |||
406 | u8 wcache; /* status of write cache */ | 348 | u8 wcache; /* status of write cache */ |
407 | u8 acoustic; /* acoustic management */ | 349 | u8 acoustic; /* acoustic management */ |
408 | u8 media; /* disk, cdrom, tape, floppy, ... */ | 350 | u8 media; /* disk, cdrom, tape, floppy, ... */ |
409 | u8 ctl; /* "normal" value for IDE_CONTROL_REG */ | 351 | u8 ctl; /* "normal" value for Control register */ |
410 | u8 ready_stat; /* min status value for drive ready */ | 352 | u8 ready_stat; /* min status value for drive ready */ |
411 | u8 mult_count; /* current multiple sector setting */ | 353 | u8 mult_count; /* current multiple sector setting */ |
412 | u8 mult_req; /* requested multiple sector setting */ | 354 | u8 mult_req; /* requested multiple sector setting */ |
@@ -507,8 +449,6 @@ typedef struct hwif_s { | |||
507 | void (*maskproc)(ide_drive_t *, int); | 449 | void (*maskproc)(ide_drive_t *, int); |
508 | /* check host's drive quirk list */ | 450 | /* check host's drive quirk list */ |
509 | void (*quirkproc)(ide_drive_t *); | 451 | void (*quirkproc)(ide_drive_t *); |
510 | /* driver soft-power interface */ | ||
511 | int (*busproc)(ide_drive_t *, int); | ||
512 | #endif | 452 | #endif |
513 | u8 (*mdma_filter)(ide_drive_t *); | 453 | u8 (*mdma_filter)(ide_drive_t *); |
514 | u8 (*udma_filter)(ide_drive_t *); | 454 | u8 (*udma_filter)(ide_drive_t *); |
@@ -578,7 +518,6 @@ typedef struct hwif_s { | |||
578 | 518 | ||
579 | unsigned noprobe : 1; /* don't probe for this interface */ | 519 | unsigned noprobe : 1; /* don't probe for this interface */ |
580 | unsigned present : 1; /* this interface exists */ | 520 | unsigned present : 1; /* this interface exists */ |
581 | unsigned hold : 1; /* this interface is always present */ | ||
582 | unsigned serialized : 1; /* serialized all channel operation */ | 521 | unsigned serialized : 1; /* serialized all channel operation */ |
583 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ | 522 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ |
584 | unsigned reset : 1; /* reset after probe */ | 523 | unsigned reset : 1; /* reset after probe */ |
@@ -586,7 +525,9 @@ typedef struct hwif_s { | |||
586 | unsigned mmio : 1; /* host uses MMIO */ | 525 | unsigned mmio : 1; /* host uses MMIO */ |
587 | unsigned straight8 : 1; /* Alan's straight 8 check */ | 526 | unsigned straight8 : 1; /* Alan's straight 8 check */ |
588 | 527 | ||
589 | struct device gendev; | 528 | struct device gendev; |
529 | struct device *portdev; | ||
530 | |||
590 | struct completion gendev_rel_comp; /* To deal with device release() */ | 531 | struct completion gendev_rel_comp; /* To deal with device release() */ |
591 | 532 | ||
592 | void *hwif_data; /* extra hwif data */ | 533 | void *hwif_data; /* extra hwif data */ |
@@ -647,6 +588,68 @@ int set_io_32bit(ide_drive_t *, int); | |||
647 | int set_pio_mode(ide_drive_t *, int); | 588 | int set_pio_mode(ide_drive_t *, int); |
648 | int set_using_dma(ide_drive_t *, int); | 589 | int set_using_dma(ide_drive_t *, int); |
649 | 590 | ||
591 | /* ATAPI packet command flags */ | ||
592 | enum { | ||
593 | /* set when an error is considered normal - no retry (ide-tape) */ | ||
594 | PC_FLAG_ABORT = (1 << 0), | ||
595 | PC_FLAG_SUPPRESS_ERROR = (1 << 1), | ||
596 | PC_FLAG_WAIT_FOR_DSC = (1 << 2), | ||
597 | PC_FLAG_DMA_OK = (1 << 3), | ||
598 | PC_FLAG_DMA_RECOMMENDED = (1 << 4), | ||
599 | PC_FLAG_DMA_IN_PROGRESS = (1 << 5), | ||
600 | PC_FLAG_DMA_ERROR = (1 << 6), | ||
601 | PC_FLAG_WRITING = (1 << 7), | ||
602 | /* command timed out */ | ||
603 | PC_FLAG_TIMEDOUT = (1 << 8), | ||
604 | }; | ||
605 | |||
606 | struct ide_atapi_pc { | ||
607 | /* actual packet bytes */ | ||
608 | u8 c[12]; | ||
609 | /* incremented on each retry */ | ||
610 | int retries; | ||
611 | int error; | ||
612 | |||
613 | /* bytes to transfer */ | ||
614 | int req_xfer; | ||
615 | /* bytes actually transferred */ | ||
616 | int xferred; | ||
617 | |||
618 | /* data buffer */ | ||
619 | u8 *buf; | ||
620 | /* current buffer position */ | ||
621 | u8 *cur_pos; | ||
622 | int buf_size; | ||
623 | /* missing/available data on the current buffer */ | ||
624 | int b_count; | ||
625 | |||
626 | /* the corresponding request */ | ||
627 | struct request *rq; | ||
628 | |||
629 | unsigned long flags; | ||
630 | |||
631 | /* | ||
632 | * those are more or less driver-specific and some of them are subject | ||
633 | * to change/removal later. | ||
634 | */ | ||
635 | u8 pc_buf[256]; | ||
636 | void (*idefloppy_callback) (ide_drive_t *); | ||
637 | ide_startstop_t (*idetape_callback) (ide_drive_t *); | ||
638 | |||
639 | /* idetape only */ | ||
640 | struct idetape_bh *bh; | ||
641 | char *b_data; | ||
642 | |||
643 | /* idescsi only for now */ | ||
644 | struct scatterlist *sg; | ||
645 | unsigned int sg_cnt; | ||
646 | |||
647 | struct scsi_cmnd *scsi_cmd; | ||
648 | void (*done) (struct scsi_cmnd *); | ||
649 | |||
650 | unsigned long timeout; | ||
651 | }; | ||
652 | |||
650 | #ifdef CONFIG_IDE_PROC_FS | 653 | #ifdef CONFIG_IDE_PROC_FS |
651 | /* | 654 | /* |
652 | * configurable drive settings | 655 | * configurable drive settings |
@@ -691,6 +694,7 @@ void proc_ide_create(void); | |||
691 | void proc_ide_destroy(void); | 694 | void proc_ide_destroy(void); |
692 | void ide_proc_register_port(ide_hwif_t *); | 695 | void ide_proc_register_port(ide_hwif_t *); |
693 | void ide_proc_port_register_devices(ide_hwif_t *); | 696 | void ide_proc_port_register_devices(ide_hwif_t *); |
697 | void ide_proc_unregister_device(ide_drive_t *); | ||
694 | void ide_proc_unregister_port(ide_hwif_t *); | 698 | void ide_proc_unregister_port(ide_hwif_t *); |
695 | void ide_proc_register_driver(ide_drive_t *, ide_driver_t *); | 699 | void ide_proc_register_driver(ide_drive_t *, ide_driver_t *); |
696 | void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *); | 700 | void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *); |
@@ -724,6 +728,7 @@ static inline void proc_ide_create(void) { ; } | |||
724 | static inline void proc_ide_destroy(void) { ; } | 728 | static inline void proc_ide_destroy(void) { ; } |
725 | static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } | 729 | static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } |
726 | static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; } | 730 | static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; } |
731 | static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; } | ||
727 | static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } | 732 | static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } |
728 | static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } | 733 | static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } |
729 | static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } | 734 | static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } |
@@ -990,7 +995,6 @@ extern void do_ide_request(struct request_queue *); | |||
990 | void ide_init_disk(struct gendisk *, ide_drive_t *); | 995 | void ide_init_disk(struct gendisk *, ide_drive_t *); |
991 | 996 | ||
992 | #ifdef CONFIG_IDEPCI_PCIBUS_ORDER | 997 | #ifdef CONFIG_IDEPCI_PCIBUS_ORDER |
993 | extern int ide_scan_direction; | ||
994 | extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); | 998 | extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name); |
995 | #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) | 999 | #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME) |
996 | #else | 1000 | #else |
@@ -1195,7 +1199,7 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} | |||
1195 | void ide_remove_port_from_hwgroup(ide_hwif_t *); | 1199 | void ide_remove_port_from_hwgroup(ide_hwif_t *); |
1196 | extern int ide_hwif_request_regions(ide_hwif_t *hwif); | 1200 | extern int ide_hwif_request_regions(ide_hwif_t *hwif); |
1197 | extern void ide_hwif_release_regions(ide_hwif_t* hwif); | 1201 | extern void ide_hwif_release_regions(ide_hwif_t* hwif); |
1198 | void ide_unregister(unsigned int, int, int); | 1202 | void ide_unregister(unsigned int); |
1199 | 1203 | ||
1200 | void ide_register_region(struct gendisk *); | 1204 | void ide_register_region(struct gendisk *); |
1201 | void ide_unregister_region(struct gendisk *); | 1205 | void ide_unregister_region(struct gendisk *); |
@@ -1204,6 +1208,8 @@ void ide_undecoded_slave(ide_drive_t *); | |||
1204 | 1208 | ||
1205 | int ide_device_add_all(u8 *idx, const struct ide_port_info *); | 1209 | int ide_device_add_all(u8 *idx, const struct ide_port_info *); |
1206 | int ide_device_add(u8 idx[4], const struct ide_port_info *); | 1210 | int ide_device_add(u8 idx[4], const struct ide_port_info *); |
1211 | void ide_port_unregister_devices(ide_hwif_t *); | ||
1212 | void ide_port_scan(ide_hwif_t *); | ||
1207 | 1213 | ||
1208 | static inline void *ide_get_hwifdata (ide_hwif_t * hwif) | 1214 | static inline void *ide_get_hwifdata (ide_hwif_t * hwif) |
1209 | { | 1215 | { |
@@ -1279,6 +1285,7 @@ extern struct mutex ide_cfg_mtx; | |||
1279 | #define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) | 1285 | #define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) |
1280 | 1286 | ||
1281 | extern struct bus_type ide_bus_type; | 1287 | extern struct bus_type ide_bus_type; |
1288 | extern struct class *ide_port_class; | ||
1282 | 1289 | ||
1283 | /* check if CACHE FLUSH (EXT) command is supported (bits defined in ATA-6) */ | 1290 | /* check if CACHE FLUSH (EXT) command is supported (bits defined in ATA-6) */ |
1284 | #define ide_id_has_flush_cache(id) ((id)->cfs_enable_2 & 0x3000) | 1291 | #define ide_id_has_flush_cache(id) ((id)->cfs_enable_2 & 0x3000) |
@@ -1307,7 +1314,10 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) | |||
1307 | 1314 | ||
1308 | static inline void ide_set_irq(ide_drive_t *drive, int on) | 1315 | static inline void ide_set_irq(ide_drive_t *drive, int on) |
1309 | { | 1316 | { |
1310 | drive->hwif->OUTB(drive->ctl | (on ? 0 : 2), IDE_CONTROL_REG); | 1317 | ide_hwif_t *hwif = drive->hwif; |
1318 | |||
1319 | hwif->OUTB(drive->ctl | (on ? 0 : 2), | ||
1320 | hwif->io_ports[IDE_CONTROL_OFFSET]); | ||
1311 | } | 1321 | } |
1312 | 1322 | ||
1313 | static inline u8 ide_read_status(ide_drive_t *drive) | 1323 | static inline u8 ide_read_status(ide_drive_t *drive) |
@@ -1331,4 +1341,26 @@ static inline u8 ide_read_error(ide_drive_t *drive) | |||
1331 | return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]); | 1341 | return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]); |
1332 | } | 1342 | } |
1333 | 1343 | ||
1344 | /* | ||
1345 | * Too bad. The drive wants to send us data which we are not ready to accept. | ||
1346 | * Just throw it away. | ||
1347 | */ | ||
1348 | static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount) | ||
1349 | { | ||
1350 | ide_hwif_t *hwif = drive->hwif; | ||
1351 | |||
1352 | /* FIXME: use ->atapi_input_bytes */ | ||
1353 | while (bcount--) | ||
1354 | (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]); | ||
1355 | } | ||
1356 | |||
1357 | static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) | ||
1358 | { | ||
1359 | ide_hwif_t *hwif = drive->hwif; | ||
1360 | |||
1361 | /* FIXME: use ->atapi_output_bytes */ | ||
1362 | while (bcount--) | ||
1363 | hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]); | ||
1364 | } | ||
1365 | |||
1334 | #endif /* _IDE_H */ | 1366 | #endif /* _IDE_H */ |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index f577c8f1c66d..f27d11ab418b 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -97,6 +97,7 @@ | |||
97 | #define IEEE80211_MAX_FRAME_LEN 2352 | 97 | #define IEEE80211_MAX_FRAME_LEN 2352 |
98 | 98 | ||
99 | #define IEEE80211_MAX_SSID_LEN 32 | 99 | #define IEEE80211_MAX_SSID_LEN 32 |
100 | #define IEEE80211_MAX_MESH_ID_LEN 32 | ||
100 | 101 | ||
101 | struct ieee80211_hdr { | 102 | struct ieee80211_hdr { |
102 | __le16 frame_control; | 103 | __le16 frame_control; |
@@ -109,6 +110,16 @@ struct ieee80211_hdr { | |||
109 | } __attribute__ ((packed)); | 110 | } __attribute__ ((packed)); |
110 | 111 | ||
111 | 112 | ||
113 | struct ieee80211s_hdr { | ||
114 | u8 flags; | ||
115 | u8 ttl; | ||
116 | u8 seqnum[3]; | ||
117 | u8 eaddr1[6]; | ||
118 | u8 eaddr2[6]; | ||
119 | u8 eaddr3[6]; | ||
120 | } __attribute__ ((packed)); | ||
121 | |||
122 | |||
112 | struct ieee80211_mgmt { | 123 | struct ieee80211_mgmt { |
113 | __le16 frame_control; | 124 | __le16 frame_control; |
114 | __le16 duration; | 125 | __le16 duration; |
@@ -206,6 +217,23 @@ struct ieee80211_mgmt { | |||
206 | __le16 params; | 217 | __le16 params; |
207 | __le16 reason_code; | 218 | __le16 reason_code; |
208 | } __attribute__((packed)) delba; | 219 | } __attribute__((packed)) delba; |
220 | struct{ | ||
221 | u8 action_code; | ||
222 | /* capab_info for open and confirm, | ||
223 | * reason for close | ||
224 | */ | ||
225 | __le16 aux; | ||
226 | /* Followed in plink_confirm by status | ||
227 | * code, AID and supported rates, | ||
228 | * and directly by supported rates in | ||
229 | * plink_open and plink_close | ||
230 | */ | ||
231 | u8 variable[0]; | ||
232 | } __attribute__((packed)) plink_action; | ||
233 | struct{ | ||
234 | u8 action_code; | ||
235 | u8 variable[0]; | ||
236 | } __attribute__((packed)) mesh_action; | ||
209 | } u; | 237 | } u; |
210 | } __attribute__ ((packed)) action; | 238 | } __attribute__ ((packed)) action; |
211 | } u; | 239 | } u; |
@@ -437,6 +465,13 @@ enum ieee80211_eid { | |||
437 | WLAN_EID_TS_DELAY = 43, | 465 | WLAN_EID_TS_DELAY = 43, |
438 | WLAN_EID_TCLAS_PROCESSING = 44, | 466 | WLAN_EID_TCLAS_PROCESSING = 44, |
439 | WLAN_EID_QOS_CAPA = 46, | 467 | WLAN_EID_QOS_CAPA = 46, |
468 | /* 802.11s */ | ||
469 | WLAN_EID_MESH_CONFIG = 36, /* Pending IEEE 802.11 ANA approval */ | ||
470 | WLAN_EID_MESH_ID = 37, /* Pending IEEE 802.11 ANA approval */ | ||
471 | WLAN_EID_PEER_LINK = 40, /* Pending IEEE 802.11 ANA approval */ | ||
472 | WLAN_EID_PREQ = 53, /* Pending IEEE 802.11 ANA approval */ | ||
473 | WLAN_EID_PREP = 54, /* Pending IEEE 802.11 ANA approval */ | ||
474 | WLAN_EID_PERR = 55, /* Pending IEEE 802.11 ANA approval */ | ||
440 | /* 802.11h */ | 475 | /* 802.11h */ |
441 | WLAN_EID_PWR_CONSTRAINT = 32, | 476 | WLAN_EID_PWR_CONSTRAINT = 32, |
442 | WLAN_EID_PWR_CAPABILITY = 33, | 477 | WLAN_EID_PWR_CAPABILITY = 33, |
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 296e8e86e91d..4d3401812e6c 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h | |||
@@ -156,6 +156,12 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb) | |||
156 | { | 156 | { |
157 | return (struct arphdr *)skb_network_header(skb); | 157 | return (struct arphdr *)skb_network_header(skb); |
158 | } | 158 | } |
159 | |||
160 | static inline int arp_hdr_len(struct net_device *dev) | ||
161 | { | ||
162 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ | ||
163 | return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; | ||
164 | } | ||
159 | #endif | 165 | #endif |
160 | 166 | ||
161 | #endif /* _LINUX_IF_ARP_H */ | 167 | #endif /* _LINUX_IF_ARP_H */ |
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 72f1c5f47be3..8c71fe2fb1f5 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h | |||
@@ -18,47 +18,8 @@ | |||
18 | #ifndef __IF_TUN_H | 18 | #ifndef __IF_TUN_H |
19 | #define __IF_TUN_H | 19 | #define __IF_TUN_H |
20 | 20 | ||
21 | /* Uncomment to enable debugging */ | ||
22 | /* #define TUN_DEBUG 1 */ | ||
23 | |||
24 | #include <linux/types.h> | 21 | #include <linux/types.h> |
25 | 22 | ||
26 | #ifdef __KERNEL__ | ||
27 | |||
28 | #ifdef TUN_DEBUG | ||
29 | #define DBG if(tun->debug)printk | ||
30 | #define DBG1 if(debug==2)printk | ||
31 | #else | ||
32 | #define DBG( a... ) | ||
33 | #define DBG1( a... ) | ||
34 | #endif | ||
35 | |||
36 | struct tun_struct { | ||
37 | struct list_head list; | ||
38 | unsigned long flags; | ||
39 | int attached; | ||
40 | uid_t owner; | ||
41 | gid_t group; | ||
42 | |||
43 | wait_queue_head_t read_wait; | ||
44 | struct sk_buff_head readq; | ||
45 | |||
46 | struct net_device *dev; | ||
47 | |||
48 | struct fasync_struct *fasync; | ||
49 | |||
50 | unsigned long if_flags; | ||
51 | u8 dev_addr[ETH_ALEN]; | ||
52 | u32 chr_filter[2]; | ||
53 | u32 net_filter[2]; | ||
54 | |||
55 | #ifdef TUN_DEBUG | ||
56 | int debug; | ||
57 | #endif | ||
58 | }; | ||
59 | |||
60 | #endif /* __KERNEL__ */ | ||
61 | |||
62 | /* Read queue size */ | 23 | /* Read queue size */ |
63 | #define TUN_READQ_SIZE 500 | 24 | #define TUN_READQ_SIZE 500 |
64 | 25 | ||
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 228eb4eb3129..f1fbe9c930d7 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h | |||
@@ -7,6 +7,10 @@ | |||
7 | #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) | 7 | #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) |
8 | #define SIOCDELTUNNEL (SIOCDEVPRIVATE + 2) | 8 | #define SIOCDELTUNNEL (SIOCDEVPRIVATE + 2) |
9 | #define SIOCCHGTUNNEL (SIOCDEVPRIVATE + 3) | 9 | #define SIOCCHGTUNNEL (SIOCDEVPRIVATE + 3) |
10 | #define SIOCGETPRL (SIOCDEVPRIVATE + 4) | ||
11 | #define SIOCADDPRL (SIOCDEVPRIVATE + 5) | ||
12 | #define SIOCDELPRL (SIOCDEVPRIVATE + 6) | ||
13 | #define SIOCCHGPRL (SIOCDEVPRIVATE + 7) | ||
10 | 14 | ||
11 | #define GRE_CSUM __constant_htons(0x8000) | 15 | #define GRE_CSUM __constant_htons(0x8000) |
12 | #define GRE_ROUTING __constant_htons(0x4000) | 16 | #define GRE_ROUTING __constant_htons(0x4000) |
@@ -17,9 +21,6 @@ | |||
17 | #define GRE_FLAGS __constant_htons(0x00F8) | 21 | #define GRE_FLAGS __constant_htons(0x00F8) |
18 | #define GRE_VERSION __constant_htons(0x0007) | 22 | #define GRE_VERSION __constant_htons(0x0007) |
19 | 23 | ||
20 | /* i_flags values for SIT mode */ | ||
21 | #define SIT_ISATAP 0x0001 | ||
22 | |||
23 | struct ip_tunnel_parm | 24 | struct ip_tunnel_parm |
24 | { | 25 | { |
25 | char name[IFNAMSIZ]; | 26 | char name[IFNAMSIZ]; |
@@ -31,4 +32,19 @@ struct ip_tunnel_parm | |||
31 | struct iphdr iph; | 32 | struct iphdr iph; |
32 | }; | 33 | }; |
33 | 34 | ||
35 | /* SIT-mode i_flags */ | ||
36 | #define SIT_ISATAP 0x0001 | ||
37 | |||
38 | struct ip_tunnel_prl { | ||
39 | __be32 addr; | ||
40 | __u16 flags; | ||
41 | __u16 __reserved; | ||
42 | __u32 datalen; | ||
43 | __u32 __reserved2; | ||
44 | void __user *data; | ||
45 | }; | ||
46 | |||
47 | /* PRL flags */ | ||
48 | #define PRL_DEFAULT 0x0001 | ||
49 | |||
34 | #endif /* _IF_TUNNEL_H_ */ | 50 | #endif /* _IF_TUNNEL_H_ */ |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 79504b22a932..15ace02b7b24 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -81,7 +81,9 @@ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | |||
81 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) | 81 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) |
82 | 82 | ||
83 | struct vlan_group { | 83 | struct vlan_group { |
84 | int real_dev_ifindex; /* The ifindex of the ethernet(like) device the vlan is attached to. */ | 84 | struct net_device *real_dev; /* The ethernet(like) device |
85 | * the vlan is attached to. | ||
86 | */ | ||
85 | unsigned int nr_vlans; | 87 | unsigned int nr_vlans; |
86 | struct hlist_node hlist; /* linked list */ | 88 | struct hlist_node hlist; /* linked list */ |
87 | struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; | 89 | struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; |
@@ -93,7 +95,7 @@ static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, | |||
93 | { | 95 | { |
94 | struct net_device **array; | 96 | struct net_device **array; |
95 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; | 97 | array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; |
96 | return array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN]; | 98 | return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; |
97 | } | 99 | } |
98 | 100 | ||
99 | static inline void vlan_group_set_device(struct vlan_group *vg, | 101 | static inline void vlan_group_set_device(struct vlan_group *vg, |
diff --git a/include/linux/if_wanpipe.h b/include/linux/if_wanpipe.h deleted file mode 100644 index e594ca6069e5..000000000000 --- a/include/linux/if_wanpipe.h +++ /dev/null | |||
@@ -1,124 +0,0 @@ | |||
1 | /***************************************************************************** | ||
2 | * if_wanpipe.h Header file for the Sangoma AF_WANPIPE Socket | ||
3 | * | ||
4 | * Author: Nenad Corbic | ||
5 | * | ||
6 | * Copyright: (c) 2000 Sangoma Technologies Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | * ============================================================================ | ||
13 | * | ||
14 | * Jan 28, 2000 Nenad Corbic Initial Version | ||
15 | * | ||
16 | *****************************************************************************/ | ||
17 | |||
18 | #ifndef __LINUX_IF_WAN_PACKET_H | ||
19 | #define __LINUX_IF_WAN_PACKET_H | ||
20 | |||
21 | struct wan_sockaddr_ll | ||
22 | { | ||
23 | unsigned short sll_family; | ||
24 | unsigned short sll_protocol; | ||
25 | int sll_ifindex; | ||
26 | unsigned short sll_hatype; | ||
27 | unsigned char sll_pkttype; | ||
28 | unsigned char sll_halen; | ||
29 | unsigned char sll_addr[8]; | ||
30 | unsigned char sll_device[14]; | ||
31 | unsigned char sll_card[14]; | ||
32 | }; | ||
33 | |||
34 | typedef struct | ||
35 | { | ||
36 | unsigned char free; | ||
37 | unsigned char state_sk; | ||
38 | int rcvbuf; | ||
39 | int sndbuf; | ||
40 | int rmem; | ||
41 | int wmem; | ||
42 | int sk_count; | ||
43 | unsigned char bound; | ||
44 | char name[14]; | ||
45 | unsigned char d_state; | ||
46 | unsigned char svc; | ||
47 | unsigned short lcn; | ||
48 | unsigned char mbox; | ||
49 | unsigned char cmd_busy; | ||
50 | unsigned char command; | ||
51 | unsigned poll; | ||
52 | unsigned poll_cnt; | ||
53 | int rblock; | ||
54 | } wan_debug_hdr_t; | ||
55 | |||
56 | #define MAX_NUM_DEBUG 10 | ||
57 | #define X25_PROT 0x16 | ||
58 | #define PVC_PROT 0x17 | ||
59 | |||
60 | typedef struct | ||
61 | { | ||
62 | wan_debug_hdr_t debug[MAX_NUM_DEBUG]; | ||
63 | }wan_debug_t; | ||
64 | |||
65 | #define SIOC_WANPIPE_GET_CALL_DATA (SIOCPROTOPRIVATE + 0) | ||
66 | #define SIOC_WANPIPE_SET_CALL_DATA (SIOCPROTOPRIVATE + 1) | ||
67 | #define SIOC_WANPIPE_ACCEPT_CALL (SIOCPROTOPRIVATE + 2) | ||
68 | #define SIOC_WANPIPE_CLEAR_CALL (SIOCPROTOPRIVATE + 3) | ||
69 | #define SIOC_WANPIPE_RESET_CALL (SIOCPROTOPRIVATE + 4) | ||
70 | #define SIOC_WANPIPE_DEBUG (SIOCPROTOPRIVATE + 5) | ||
71 | #define SIOC_WANPIPE_SET_NONBLOCK (SIOCPROTOPRIVATE + 6) | ||
72 | #define SIOC_WANPIPE_CHECK_TX (SIOCPROTOPRIVATE + 7) | ||
73 | #define SIOC_WANPIPE_SOCK_STATE (SIOCPROTOPRIVATE + 8) | ||
74 | |||
75 | /* Packet types */ | ||
76 | |||
77 | #define WAN_PACKET_HOST 0 /* To us */ | ||
78 | #define WAN_PACKET_BROADCAST 1 /* To all */ | ||
79 | #define WAN_PACKET_MULTICAST 2 /* To group */ | ||
80 | #define WAN_PACKET_OTHERHOST 3 /* To someone else */ | ||
81 | #define WAN_PACKET_OUTGOING 4 /* Outgoing of any type */ | ||
82 | /* These ones are invisible by user level */ | ||
83 | #define WAN_PACKET_LOOPBACK 5 /* MC/BRD frame looped back */ | ||
84 | #define WAN_PACKET_FASTROUTE 6 /* Fastrouted frame */ | ||
85 | |||
86 | |||
87 | /* X25 specific */ | ||
88 | #define WAN_PACKET_DATA 7 | ||
89 | #define WAN_PACKET_CMD 8 | ||
90 | #define WAN_PACKET_ASYNC 9 | ||
91 | #define WAN_PACKET_ERR 10 | ||
92 | |||
93 | /* Packet socket options */ | ||
94 | |||
95 | #define WAN_PACKET_ADD_MEMBERSHIP 1 | ||
96 | #define WAN_PACKET_DROP_MEMBERSHIP 2 | ||
97 | |||
98 | #define WAN_PACKET_MR_MULTICAST 0 | ||
99 | #define WAN_PACKET_MR_PROMISC 1 | ||
100 | #define WAN_PACKET_MR_ALLMULTI 2 | ||
101 | |||
102 | #ifdef __KERNEL__ | ||
103 | |||
104 | /* Private wanpipe socket structures. */ | ||
105 | struct wanpipe_opt | ||
106 | { | ||
107 | void *mbox; /* Mail box */ | ||
108 | void *card; /* Card bouded to */ | ||
109 | struct net_device *dev; /* Bounded device */ | ||
110 | unsigned short lcn; /* Binded LCN */ | ||
111 | unsigned char svc; /* 0=pvc, 1=svc */ | ||
112 | unsigned char timer; /* flag for delayed transmit*/ | ||
113 | struct timer_list tx_timer; | ||
114 | unsigned poll_cnt; | ||
115 | unsigned char force; /* Used to force sock release */ | ||
116 | atomic_t packet_sent; | ||
117 | unsigned short num; | ||
118 | }; | ||
119 | |||
120 | #define wp_sk(__sk) ((struct wanpipe_opt *)(__sk)->sk_protinfo) | ||
121 | |||
122 | #endif | ||
123 | |||
124 | #endif | ||
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f510e7e382a8..f5a1a0db2e8e 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
@@ -80,27 +80,6 @@ struct igmpv3_query { | |||
80 | __be32 srcs[0]; | 80 | __be32 srcs[0]; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | #ifdef __KERNEL__ | ||
84 | #include <linux/skbuff.h> | ||
85 | |||
86 | static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) | ||
87 | { | ||
88 | return (struct igmphdr *)skb_transport_header(skb); | ||
89 | } | ||
90 | |||
91 | static inline struct igmpv3_report * | ||
92 | igmpv3_report_hdr(const struct sk_buff *skb) | ||
93 | { | ||
94 | return (struct igmpv3_report *)skb_transport_header(skb); | ||
95 | } | ||
96 | |||
97 | static inline struct igmpv3_query * | ||
98 | igmpv3_query_hdr(const struct sk_buff *skb) | ||
99 | { | ||
100 | return (struct igmpv3_query *)skb_transport_header(skb); | ||
101 | } | ||
102 | #endif | ||
103 | |||
104 | #define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */ | 83 | #define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* From RFC1112 */ |
105 | #define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */ | 84 | #define IGMP_HOST_MEMBERSHIP_REPORT 0x12 /* Ditto */ |
106 | #define IGMP_DVMRP 0x13 /* DVMRP routing */ | 85 | #define IGMP_DVMRP 0x13 /* DVMRP routing */ |
@@ -151,6 +130,23 @@ static inline struct igmpv3_query * | |||
151 | #include <linux/timer.h> | 130 | #include <linux/timer.h> |
152 | #include <linux/in.h> | 131 | #include <linux/in.h> |
153 | 132 | ||
133 | static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb) | ||
134 | { | ||
135 | return (struct igmphdr *)skb_transport_header(skb); | ||
136 | } | ||
137 | |||
138 | static inline struct igmpv3_report * | ||
139 | igmpv3_report_hdr(const struct sk_buff *skb) | ||
140 | { | ||
141 | return (struct igmpv3_report *)skb_transport_header(skb); | ||
142 | } | ||
143 | |||
144 | static inline struct igmpv3_query * | ||
145 | igmpv3_query_hdr(const struct sk_buff *skb) | ||
146 | { | ||
147 | return (struct igmpv3_query *)skb_transport_header(skb); | ||
148 | } | ||
149 | |||
154 | extern int sysctl_igmp_max_memberships; | 150 | extern int sysctl_igmp_max_memberships; |
155 | extern int sysctl_igmp_max_msf; | 151 | extern int sysctl_igmp_max_msf; |
156 | 152 | ||
diff --git a/include/linux/in6.h b/include/linux/in6.h index 2a61c82af115..bc492048c349 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h | |||
@@ -48,6 +48,14 @@ extern const struct in6_addr in6addr_any; | |||
48 | #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } | 48 | #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } |
49 | extern const struct in6_addr in6addr_loopback; | 49 | extern const struct in6_addr in6addr_loopback; |
50 | #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } | 50 | #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } |
51 | #ifdef __KERNEL__ | ||
52 | extern const struct in6_addr in6addr_linklocal_allnodes; | ||
53 | #define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ | ||
54 | { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } | ||
55 | extern const struct in6_addr in6addr_linklocal_allrouters; | ||
56 | #define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ | ||
57 | { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } } | ||
58 | #endif | ||
51 | 59 | ||
52 | struct sockaddr_in6 { | 60 | struct sockaddr_in6 { |
53 | unsigned short int sin6_family; /* AF_INET6 */ | 61 | unsigned short int sin6_family; /* AF_INET6 */ |
@@ -249,4 +257,30 @@ struct in6_flowlabel_req | |||
249 | * IP6T_SO_GET_REVISION_TARGET 69 | 257 | * IP6T_SO_GET_REVISION_TARGET 69 |
250 | */ | 258 | */ |
251 | 259 | ||
260 | /* RFC5014: Source address selection */ | ||
261 | #define IPV6_ADDR_PREFERENCES 72 | ||
262 | |||
263 | #define IPV6_PREFER_SRC_TMP 0x0001 | ||
264 | #define IPV6_PREFER_SRC_PUBLIC 0x0002 | ||
265 | #define IPV6_PREFER_SRC_PUBTMP_DEFAULT 0x0100 | ||
266 | #define IPV6_PREFER_SRC_COA 0x0004 | ||
267 | #define IPV6_PREFER_SRC_HOME 0x0400 | ||
268 | #define IPV6_PREFER_SRC_CGA 0x0008 | ||
269 | #define IPV6_PREFER_SRC_NONCGA 0x0800 | ||
270 | |||
271 | /* | ||
272 | * Multicast Routing: | ||
273 | * see include/linux/mroute6.h. | ||
274 | * | ||
275 | * MRT6_INIT 200 | ||
276 | * MRT6_DONE 201 | ||
277 | * MRT6_ADD_MIF 202 | ||
278 | * MRT6_DEL_MIF 203 | ||
279 | * MRT6_ADD_MFC 204 | ||
280 | * MRT6_DEL_MFC 205 | ||
281 | * MRT6_VERSION 206 | ||
282 | * MRT6_ASSERT 207 | ||
283 | * MRT6_PIM 208 | ||
284 | * (reserved) 209 | ||
285 | */ | ||
252 | #endif | 286 | #endif |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index fc4e3db649e8..7009b0cdd06f 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -70,13 +70,13 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) | |||
70 | ipv4_devconf_set((in_dev), NET_IPV4_CONF_ ## attr, (val)) | 70 | ipv4_devconf_set((in_dev), NET_IPV4_CONF_ ## attr, (val)) |
71 | 71 | ||
72 | #define IN_DEV_ANDCONF(in_dev, attr) \ | 72 | #define IN_DEV_ANDCONF(in_dev, attr) \ |
73 | (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) && \ | 73 | (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ |
74 | IN_DEV_CONF_GET((in_dev), attr)) | 74 | IN_DEV_CONF_GET((in_dev), attr)) |
75 | #define IN_DEV_ORCONF(in_dev, attr) \ | 75 | #define IN_DEV_ORCONF(in_dev, attr) \ |
76 | (IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr) || \ | 76 | (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) || \ |
77 | IN_DEV_CONF_GET((in_dev), attr)) | 77 | IN_DEV_CONF_GET((in_dev), attr)) |
78 | #define IN_DEV_MAXCONF(in_dev, attr) \ | 78 | #define IN_DEV_MAXCONF(in_dev, attr) \ |
79 | (max(IPV4_DEVCONF_ALL(in_dev->dev->nd_net, attr), \ | 79 | (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ |
80 | IN_DEV_CONF_GET((in_dev), attr))) | 80 | IN_DEV_CONF_GET((in_dev), attr))) |
81 | 81 | ||
82 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) | 82 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) |
@@ -129,7 +129,7 @@ extern int unregister_inetaddr_notifier(struct notifier_block *nb); | |||
129 | 129 | ||
130 | extern struct net_device *ip_dev_find(struct net *net, __be32 addr); | 130 | extern struct net_device *ip_dev_find(struct net *net, __be32 addr); |
131 | extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); | 131 | extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); |
132 | extern int devinet_ioctl(unsigned int cmd, void __user *); | 132 | extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); |
133 | extern void devinet_init(void); | 133 | extern void devinet_init(void); |
134 | extern struct in_device *inetdev_by_index(struct net *, int); | 134 | extern struct in_device *inetdev_by_index(struct net *, int); |
135 | extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); | 135 | extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f8ab4ce70564..b5fef13148bd 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq); | |||
102 | extern void disable_irq(unsigned int irq); | 102 | extern void disable_irq(unsigned int irq); |
103 | extern void enable_irq(unsigned int irq); | 103 | extern void enable_irq(unsigned int irq); |
104 | 104 | ||
105 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
106 | |||
107 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | ||
108 | extern int irq_can_set_affinity(unsigned int irq); | ||
109 | |||
110 | #else /* CONFIG_SMP */ | ||
111 | |||
112 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | ||
113 | { | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | |||
117 | static inline int irq_can_set_affinity(unsigned int irq) | ||
118 | { | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ | ||
123 | |||
105 | #ifdef CONFIG_GENERIC_HARDIRQS | 124 | #ifdef CONFIG_GENERIC_HARDIRQS |
106 | /* | 125 | /* |
107 | * Special lockdep variants of irq disabling/enabling. | 126 | * Special lockdep variants of irq disabling/enabling. |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index cac4b364cd40..2b7a1187cb29 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -91,8 +91,10 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
91 | * if ref count is zero, don't allow sharing (ioc is going away, it's | 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
92 | * a race). | 92 | * a race). |
93 | */ | 93 | */ |
94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) | 94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) { |
95 | atomic_inc(&ioc->nr_tasks); | ||
95 | return ioc; | 96 | return ioc; |
97 | } | ||
96 | 98 | ||
97 | return NULL; | 99 | return NULL; |
98 | } | 100 | } |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 4aaefc349a4b..10b666b61add 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -53,7 +53,7 @@ struct ipv6_opt_hdr { | |||
53 | /* | 53 | /* |
54 | * TLV encoded option data follows. | 54 | * TLV encoded option data follows. |
55 | */ | 55 | */ |
56 | }; | 56 | } __attribute__ ((packed)); /* required for some archs */ |
57 | 57 | ||
58 | #define ipv6_destopt_hdr ipv6_opt_hdr | 58 | #define ipv6_destopt_hdr ipv6_opt_hdr |
59 | #define ipv6_hopopt_hdr ipv6_opt_hdr | 59 | #define ipv6_hopopt_hdr ipv6_opt_hdr |
@@ -160,6 +160,9 @@ struct ipv6_devconf { | |||
160 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 160 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
161 | __s32 optimistic_dad; | 161 | __s32 optimistic_dad; |
162 | #endif | 162 | #endif |
163 | #ifdef CONFIG_IPV6_MROUTE | ||
164 | __s32 mc_forwarding; | ||
165 | #endif | ||
163 | void *sysctl; | 166 | void *sysctl; |
164 | }; | 167 | }; |
165 | 168 | ||
@@ -190,6 +193,7 @@ enum { | |||
190 | DEVCONF_PROXY_NDP, | 193 | DEVCONF_PROXY_NDP, |
191 | DEVCONF_OPTIMISTIC_DAD, | 194 | DEVCONF_OPTIMISTIC_DAD, |
192 | DEVCONF_ACCEPT_SOURCE_ROUTE, | 195 | DEVCONF_ACCEPT_SOURCE_ROUTE, |
196 | DEVCONF_MC_FORWARDING, | ||
193 | DEVCONF_MAX | 197 | DEVCONF_MAX |
194 | }; | 198 | }; |
195 | 199 | ||
@@ -230,6 +234,7 @@ struct inet6_skb_parm { | |||
230 | #endif | 234 | #endif |
231 | 235 | ||
232 | #define IP6SKB_XFRM_TRANSFORMED 1 | 236 | #define IP6SKB_XFRM_TRANSFORMED 1 |
237 | #define IP6SKB_FORWARDED 2 | ||
233 | }; | 238 | }; |
234 | 239 | ||
235 | #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) | 240 | #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) |
@@ -274,8 +279,29 @@ struct ipv6_pinfo { | |||
274 | 279 | ||
275 | __be32 flow_label; | 280 | __be32 flow_label; |
276 | __u32 frag_size; | 281 | __u32 frag_size; |
277 | __s16 hop_limit; | 282 | |
278 | __s16 mcast_hops; | 283 | /* |
284 | * Packed in 16bits. | ||
285 | * Omit one shift by by putting the signed field at MSB. | ||
286 | */ | ||
287 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
288 | __s16 hop_limit:9; | ||
289 | __u16 __unused_1:7; | ||
290 | #else | ||
291 | __u16 __unused_1:7; | ||
292 | __s16 hop_limit:9; | ||
293 | #endif | ||
294 | |||
295 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
296 | /* Packed in 16bits. */ | ||
297 | __s16 mcast_hops:9; | ||
298 | __u16 __unused_2:6, | ||
299 | mc_loop:1; | ||
300 | #else | ||
301 | __u16 mc_loop:1, | ||
302 | __unused_2:6; | ||
303 | __s16 mcast_hops:9; | ||
304 | #endif | ||
279 | int mcast_oif; | 305 | int mcast_oif; |
280 | 306 | ||
281 | /* pktoption flags */ | 307 | /* pktoption flags */ |
@@ -298,11 +324,14 @@ struct ipv6_pinfo { | |||
298 | } rxopt; | 324 | } rxopt; |
299 | 325 | ||
300 | /* sockopt flags */ | 326 | /* sockopt flags */ |
301 | __u8 mc_loop:1, | 327 | __u8 recverr:1, |
302 | recverr:1, | ||
303 | sndflow:1, | 328 | sndflow:1, |
304 | pmtudisc:2, | 329 | pmtudisc:2, |
305 | ipv6only:1; | 330 | ipv6only:1, |
331 | srcprefs:3; /* 001: prefer temporary address | ||
332 | * 010: prefer public address | ||
333 | * 100: prefer care-of address | ||
334 | */ | ||
306 | __u8 tclass; | 335 | __u8 tclass; |
307 | 336 | ||
308 | __u32 dst_cookie; | 337 | __u32 dst_cookie; |
@@ -315,9 +344,8 @@ struct ipv6_pinfo { | |||
315 | struct sk_buff *pktoptions; | 344 | struct sk_buff *pktoptions; |
316 | struct { | 345 | struct { |
317 | struct ipv6_txoptions *opt; | 346 | struct ipv6_txoptions *opt; |
318 | struct rt6_info *rt; | 347 | u8 hop_limit; |
319 | int hop_limit; | 348 | u8 tclass; |
320 | int tclass; | ||
321 | } cork; | 349 | } cork; |
322 | }; | 350 | }; |
323 | 351 | ||
@@ -458,7 +486,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) | |||
458 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | 486 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ |
459 | 487 | ||
460 | #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ | 488 | #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ |
461 | (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ | 489 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
462 | ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ | 490 | ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ |
463 | ((__sk)->sk_family == AF_INET6) && \ | 491 | ((__sk)->sk_family == AF_INET6) && \ |
464 | ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ | 492 | ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ |
@@ -466,7 +494,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) | |||
466 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | 494 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) |
467 | 495 | ||
468 | #define INET6_TW_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif) \ | 496 | #define INET6_TW_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif) \ |
469 | (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ | 497 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
470 | (*((__portpair *)&(inet_twsk(__sk)->tw_dport)) == (__ports)) && \ | 498 | (*((__portpair *)&(inet_twsk(__sk)->tw_dport)) == (__ports)) && \ |
471 | ((__sk)->sk_family == PF_INET6) && \ | 499 | ((__sk)->sk_family == PF_INET6) && \ |
472 | (ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))) && \ | 500 | (ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))) && \ |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 176e5e790a44..1883a85625dd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | |||
228 | 228 | ||
229 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | 229 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ |
230 | 230 | ||
231 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | ||
232 | extern int irq_can_set_affinity(unsigned int irq); | ||
233 | |||
234 | #else /* CONFIG_SMP */ | 231 | #else /* CONFIG_SMP */ |
235 | 232 | ||
236 | #define move_native_irq(x) | 233 | #define move_native_irq(x) |
237 | #define move_masked_irq(x) | 234 | #define move_masked_irq(x) |
238 | 235 | ||
239 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | ||
240 | { | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | |||
244 | static inline int irq_can_set_affinity(unsigned int irq) { return 0; } | ||
245 | |||
246 | #endif /* CONFIG_SMP */ | 236 | #endif /* CONFIG_SMP */ |
247 | 237 | ||
248 | #ifdef CONFIG_IRQBALANCE | 238 | #ifdef CONFIG_IRQBALANCE |
diff --git a/include/linux/isdn.h b/include/linux/isdn.h index 9cb2855bb170..44cd663c53b6 100644 --- a/include/linux/isdn.h +++ b/include/linux/isdn.h | |||
@@ -16,14 +16,8 @@ | |||
16 | 16 | ||
17 | #include <linux/ioctl.h> | 17 | #include <linux/ioctl.h> |
18 | 18 | ||
19 | #ifdef CONFIG_COBALT_MICRO_SERVER | ||
20 | /* Save memory */ | ||
21 | #define ISDN_MAX_DRIVERS 2 | ||
22 | #define ISDN_MAX_CHANNELS 8 | ||
23 | #else | ||
24 | #define ISDN_MAX_DRIVERS 32 | 19 | #define ISDN_MAX_DRIVERS 32 |
25 | #define ISDN_MAX_CHANNELS 64 | 20 | #define ISDN_MAX_CHANNELS 64 |
26 | #endif | ||
27 | 21 | ||
28 | /* New ioctl-codes */ | 22 | /* New ioctl-codes */ |
29 | #define IIOCNETAIF _IO('I',1) | 23 | #define IIOCNETAIF _IO('I',1) |
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h new file mode 100644 index 000000000000..9757b1a6d9dc --- /dev/null +++ b/include/linux/kgdb.h | |||
@@ -0,0 +1,281 @@ | |||
1 | /* | ||
2 | * This provides the callbacks and functions that KGDB needs to share between | ||
3 | * the core, I/O and arch-specific portions. | ||
4 | * | ||
5 | * Author: Amit Kale <amitkale@linsyssoft.com> and | ||
6 | * Tom Rini <trini@kernel.crashing.org> | ||
7 | * | ||
8 | * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc. | ||
9 | * This file is licensed under the terms of the GNU General Public License | ||
10 | * version 2. This program is licensed "as is" without any warranty of any | ||
11 | * kind, whether express or implied. | ||
12 | */ | ||
13 | #ifndef _KGDB_H_ | ||
14 | #define _KGDB_H_ | ||
15 | |||
16 | #include <linux/serial_8250.h> | ||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/init.h> | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/kgdb.h> | ||
22 | |||
23 | struct pt_regs; | ||
24 | |||
25 | /** | ||
26 | * kgdb_skipexception - (optional) exit kgdb_handle_exception early | ||
27 | * @exception: Exception vector number | ||
28 | * @regs: Current &struct pt_regs. | ||
29 | * | ||
30 | * On some architectures it is required to skip a breakpoint | ||
31 | * exception when it occurs after a breakpoint has been removed. | ||
32 | * This can be implemented in the architecture specific portion of | ||
33 | * for kgdb. | ||
34 | */ | ||
35 | extern int kgdb_skipexception(int exception, struct pt_regs *regs); | ||
36 | |||
37 | /** | ||
38 | * kgdb_post_primary_code - (optional) Save error vector/code numbers. | ||
39 | * @regs: Original pt_regs. | ||
40 | * @e_vector: Original error vector. | ||
41 | * @err_code: Original error code. | ||
42 | * | ||
43 | * This is usually needed on architectures which support SMP and | ||
44 | * KGDB. This function is called after all the secondary cpus have | ||
45 | * been put to a know spin state and the primary CPU has control over | ||
46 | * KGDB. | ||
47 | */ | ||
48 | extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, | ||
49 | int err_code); | ||
50 | |||
51 | /** | ||
52 | * kgdb_disable_hw_debug - (optional) Disable hardware debugging hook | ||
53 | * @regs: Current &struct pt_regs. | ||
54 | * | ||
55 | * This function will be called if the particular architecture must | ||
56 | * disable hardware debugging while it is processing gdb packets or | ||
57 | * handling exception. | ||
58 | */ | ||
59 | extern void kgdb_disable_hw_debug(struct pt_regs *regs); | ||
60 | |||
61 | struct tasklet_struct; | ||
62 | struct task_struct; | ||
63 | struct uart_port; | ||
64 | |||
65 | /** | ||
66 | * kgdb_breakpoint - compiled in breakpoint | ||
67 | * | ||
68 | * This will be impelmented a static inline per architecture. This | ||
69 | * function is called by the kgdb core to execute an architecture | ||
70 | * specific trap to cause kgdb to enter the exception processing. | ||
71 | * | ||
72 | */ | ||
73 | void kgdb_breakpoint(void); | ||
74 | |||
75 | extern int kgdb_connected; | ||
76 | |||
77 | extern atomic_t kgdb_setting_breakpoint; | ||
78 | extern atomic_t kgdb_cpu_doing_single_step; | ||
79 | |||
80 | extern struct task_struct *kgdb_usethread; | ||
81 | extern struct task_struct *kgdb_contthread; | ||
82 | |||
83 | enum kgdb_bptype { | ||
84 | BP_BREAKPOINT = 0, | ||
85 | BP_HARDWARE_BREAKPOINT, | ||
86 | BP_WRITE_WATCHPOINT, | ||
87 | BP_READ_WATCHPOINT, | ||
88 | BP_ACCESS_WATCHPOINT | ||
89 | }; | ||
90 | |||
91 | enum kgdb_bpstate { | ||
92 | BP_UNDEFINED = 0, | ||
93 | BP_REMOVED, | ||
94 | BP_SET, | ||
95 | BP_ACTIVE | ||
96 | }; | ||
97 | |||
98 | struct kgdb_bkpt { | ||
99 | unsigned long bpt_addr; | ||
100 | unsigned char saved_instr[BREAK_INSTR_SIZE]; | ||
101 | enum kgdb_bptype type; | ||
102 | enum kgdb_bpstate state; | ||
103 | }; | ||
104 | |||
105 | #ifndef KGDB_MAX_BREAKPOINTS | ||
106 | # define KGDB_MAX_BREAKPOINTS 1000 | ||
107 | #endif | ||
108 | |||
109 | #define KGDB_HW_BREAKPOINT 1 | ||
110 | |||
111 | /* | ||
112 | * Functions each KGDB-supporting architecture must provide: | ||
113 | */ | ||
114 | |||
115 | /** | ||
116 | * kgdb_arch_init - Perform any architecture specific initalization. | ||
117 | * | ||
118 | * This function will handle the initalization of any architecture | ||
119 | * specific callbacks. | ||
120 | */ | ||
121 | extern int kgdb_arch_init(void); | ||
122 | |||
123 | /** | ||
124 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | ||
125 | * | ||
126 | * This function will handle the uninitalization of any architecture | ||
127 | * specific callbacks, for dynamic registration and unregistration. | ||
128 | */ | ||
129 | extern void kgdb_arch_exit(void); | ||
130 | |||
131 | /** | ||
132 | * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs | ||
133 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | ||
134 | * @regs: The &struct pt_regs of the current process. | ||
135 | * | ||
136 | * Convert the pt_regs in @regs into the format for registers that | ||
137 | * GDB expects, stored in @gdb_regs. | ||
138 | */ | ||
139 | extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs); | ||
140 | |||
141 | /** | ||
142 | * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs | ||
143 | * @gdb_regs: A pointer to hold the registers in the order GDB wants. | ||
144 | * @p: The &struct task_struct of the desired process. | ||
145 | * | ||
146 | * Convert the register values of the sleeping process in @p to | ||
147 | * the format that GDB expects. | ||
148 | * This function is called when kgdb does not have access to the | ||
149 | * &struct pt_regs and therefore it should fill the gdb registers | ||
150 | * @gdb_regs with what has been saved in &struct thread_struct | ||
151 | * thread field during switch_to. | ||
152 | */ | ||
153 | extern void | ||
154 | sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p); | ||
155 | |||
156 | /** | ||
157 | * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs. | ||
158 | * @gdb_regs: A pointer to hold the registers we've received from GDB. | ||
159 | * @regs: A pointer to a &struct pt_regs to hold these values in. | ||
160 | * | ||
161 | * Convert the GDB regs in @gdb_regs into the pt_regs, and store them | ||
162 | * in @regs. | ||
163 | */ | ||
164 | extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs); | ||
165 | |||
166 | /** | ||
167 | * kgdb_arch_handle_exception - Handle architecture specific GDB packets. | ||
168 | * @vector: The error vector of the exception that happened. | ||
169 | * @signo: The signal number of the exception that happened. | ||
170 | * @err_code: The error code of the exception that happened. | ||
171 | * @remcom_in_buffer: The buffer of the packet we have read. | ||
172 | * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. | ||
173 | * @regs: The &struct pt_regs of the current process. | ||
174 | * | ||
175 | * This function MUST handle the 'c' and 's' command packets, | ||
176 | * as well packets to set / remove a hardware breakpoint, if used. | ||
177 | * If there are additional packets which the hardware needs to handle, | ||
178 | * they are handled here. The code should return -1 if it wants to | ||
179 | * process more packets, and a %0 or %1 if it wants to exit from the | ||
180 | * kgdb callback. | ||
181 | */ | ||
182 | extern int | ||
183 | kgdb_arch_handle_exception(int vector, int signo, int err_code, | ||
184 | char *remcom_in_buffer, | ||
185 | char *remcom_out_buffer, | ||
186 | struct pt_regs *regs); | ||
187 | |||
188 | /** | ||
189 | * kgdb_roundup_cpus - Get other CPUs into a holding pattern | ||
190 | * @flags: Current IRQ state | ||
191 | * | ||
192 | * On SMP systems, we need to get the attention of the other CPUs | ||
193 | * and get them be in a known state. This should do what is needed | ||
194 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, | ||
195 | * the NMI approach is not used for rounding up all the CPUs. For example, | ||
196 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In | ||
197 | * this case, we have to make sure that interrupts are enabled before | ||
198 | * calling smp_call_function(). The argument to this function is | ||
199 | * the flags that will be used when restoring the interrupts. There is | ||
200 | * local_irq_save() call before kgdb_roundup_cpus(). | ||
201 | * | ||
202 | * On non-SMP systems, this is not called. | ||
203 | */ | ||
204 | extern void kgdb_roundup_cpus(unsigned long flags); | ||
205 | |||
206 | /* Optional functions. */ | ||
207 | extern int kgdb_validate_break_address(unsigned long addr); | ||
208 | extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr); | ||
209 | extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle); | ||
210 | |||
211 | /** | ||
212 | * struct kgdb_arch - Describe architecture specific values. | ||
213 | * @gdb_bpt_instr: The instruction to trigger a breakpoint. | ||
214 | * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT. | ||
215 | * @set_breakpoint: Allow an architecture to specify how to set a software | ||
216 | * breakpoint. | ||
217 | * @remove_breakpoint: Allow an architecture to specify how to remove a | ||
218 | * software breakpoint. | ||
219 | * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware | ||
220 | * breakpoint. | ||
221 | * @remove_hw_breakpoint: Allow an architecture to specify how to remove a | ||
222 | * hardware breakpoint. | ||
223 | * @remove_all_hw_break: Allow an architecture to specify how to remove all | ||
224 | * hardware breakpoints. | ||
225 | * @correct_hw_break: Allow an architecture to specify how to correct the | ||
226 | * hardware debug registers. | ||
227 | */ | ||
228 | struct kgdb_arch { | ||
229 | unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE]; | ||
230 | unsigned long flags; | ||
231 | |||
232 | int (*set_breakpoint)(unsigned long, char *); | ||
233 | int (*remove_breakpoint)(unsigned long, char *); | ||
234 | int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); | ||
235 | int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype); | ||
236 | void (*remove_all_hw_break)(void); | ||
237 | void (*correct_hw_break)(void); | ||
238 | }; | ||
239 | |||
240 | /** | ||
241 | * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB. | ||
242 | * @name: Name of the I/O driver. | ||
243 | * @read_char: Pointer to a function that will return one char. | ||
244 | * @write_char: Pointer to a function that will write one char. | ||
245 | * @flush: Pointer to a function that will flush any pending writes. | ||
246 | * @init: Pointer to a function that will initialize the device. | ||
247 | * @pre_exception: Pointer to a function that will do any prep work for | ||
248 | * the I/O driver. | ||
249 | * @post_exception: Pointer to a function that will do any cleanup work | ||
250 | * for the I/O driver. | ||
251 | */ | ||
252 | struct kgdb_io { | ||
253 | const char *name; | ||
254 | int (*read_char) (void); | ||
255 | void (*write_char) (u8); | ||
256 | void (*flush) (void); | ||
257 | int (*init) (void); | ||
258 | void (*pre_exception) (void); | ||
259 | void (*post_exception) (void); | ||
260 | }; | ||
261 | |||
262 | extern struct kgdb_arch arch_kgdb_ops; | ||
263 | |||
264 | extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); | ||
265 | extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); | ||
266 | |||
267 | extern int kgdb_hex2long(char **ptr, long *long_val); | ||
268 | extern int kgdb_mem2hex(char *mem, char *buf, int count); | ||
269 | extern int kgdb_hex2mem(char *buf, char *mem, int count); | ||
270 | |||
271 | extern int kgdb_isremovedbreak(unsigned long addr); | ||
272 | |||
273 | extern int | ||
274 | kgdb_handle_exception(int ex_vector, int signo, int err_code, | ||
275 | struct pt_regs *regs); | ||
276 | extern int kgdb_nmicallback(int cpu, void *regs); | ||
277 | |||
278 | extern int kgdb_single_step; | ||
279 | extern atomic_t kgdb_active; | ||
280 | |||
281 | #endif /* _KGDB_H_ */ | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 37ee881c42ac..165734a2dd47 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -122,6 +122,8 @@ enum { | |||
122 | 122 | ||
123 | ATAPI_MAX_DRAIN = 16 << 10, | 123 | ATAPI_MAX_DRAIN = 16 << 10, |
124 | 124 | ||
125 | ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, | ||
126 | |||
125 | ATA_SHT_EMULATED = 1, | 127 | ATA_SHT_EMULATED = 1, |
126 | ATA_SHT_CMD_PER_LUN = 1, | 128 | ATA_SHT_CMD_PER_LUN = 1, |
127 | ATA_SHT_THIS_ID = -1, | 129 | ATA_SHT_THIS_ID = -1, |
@@ -163,9 +165,6 @@ enum { | |||
163 | ATA_DEV_NONE = 9, /* no device */ | 165 | ATA_DEV_NONE = 9, /* no device */ |
164 | 166 | ||
165 | /* struct ata_link flags */ | 167 | /* struct ata_link flags */ |
166 | ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */ | ||
167 | ATA_LFLAG_SKIP_D2H_BSY = (1 << 1), /* can't wait for the first D2H | ||
168 | * Register FIS clearing BSY */ | ||
169 | ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ | 168 | ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ |
170 | ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ | 169 | ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ |
171 | ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ | 170 | ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ |
@@ -225,6 +224,7 @@ enum { | |||
225 | ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ | 224 | ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ |
226 | ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ | 225 | ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ |
227 | ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ | 226 | ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ |
227 | ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ | ||
228 | 228 | ||
229 | ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ | 229 | ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ |
230 | ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ | 230 | ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ |
@@ -249,6 +249,25 @@ enum { | |||
249 | */ | 249 | */ |
250 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, | 250 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, |
251 | 251 | ||
252 | /* Spec mandates to wait for ">= 2ms" before checking status | ||
253 | * after reset. We wait 150ms, because that was the magic | ||
254 | * delay used for ATAPI devices in Hale Landis's ATADRVR, for | ||
255 | * the period of time between when the ATA command register is | ||
256 | * written, and then status is checked. Because waiting for | ||
257 | * "a while" before checking status is fine, post SRST, we | ||
258 | * perform this magic delay here as well. | ||
259 | * | ||
260 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
261 | */ | ||
262 | ATA_WAIT_AFTER_RESET_MSECS = 150, | ||
263 | |||
264 | /* If PMP is supported, we have to do follow-up SRST. As some | ||
265 | * PMPs don't send D2H Reg FIS after hardreset, LLDs are | ||
266 | * advised to wait only for the following duration before | ||
267 | * doing SRST. | ||
268 | */ | ||
269 | ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ, | ||
270 | |||
252 | /* ATA bus states */ | 271 | /* ATA bus states */ |
253 | BUS_UNKNOWN = 0, | 272 | BUS_UNKNOWN = 0, |
254 | BUS_DMA = 1, | 273 | BUS_DMA = 1, |
@@ -292,17 +311,16 @@ enum { | |||
292 | 311 | ||
293 | /* reset / recovery action types */ | 312 | /* reset / recovery action types */ |
294 | ATA_EH_REVALIDATE = (1 << 0), | 313 | ATA_EH_REVALIDATE = (1 << 0), |
295 | ATA_EH_SOFTRESET = (1 << 1), | 314 | ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ |
296 | ATA_EH_HARDRESET = (1 << 2), | 315 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ |
316 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | ||
297 | ATA_EH_ENABLE_LINK = (1 << 3), | 317 | ATA_EH_ENABLE_LINK = (1 << 3), |
298 | ATA_EH_LPM = (1 << 4), /* link power management action */ | 318 | ATA_EH_LPM = (1 << 4), /* link power management action */ |
299 | 319 | ||
300 | ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | ||
301 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, | 320 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, |
302 | 321 | ||
303 | /* ata_eh_info->flags */ | 322 | /* ata_eh_info->flags */ |
304 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 323 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
305 | ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ | ||
306 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 324 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
307 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 325 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
308 | 326 | ||
@@ -313,7 +331,6 @@ enum { | |||
313 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ | 331 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ |
314 | 332 | ||
315 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, | 333 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, |
316 | ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, | ||
317 | 334 | ||
318 | /* max tries if error condition is still set after ->error_handler */ | 335 | /* max tries if error condition is still set after ->error_handler */ |
319 | ATA_EH_MAX_TRIES = 5, | 336 | ATA_EH_MAX_TRIES = 5, |
@@ -352,6 +369,22 @@ enum { | |||
352 | ATAPI_READ_CD = 2, /* READ CD [MSF] */ | 369 | ATAPI_READ_CD = 2, /* READ CD [MSF] */ |
353 | ATAPI_PASS_THRU = 3, /* SAT pass-thru */ | 370 | ATAPI_PASS_THRU = 3, /* SAT pass-thru */ |
354 | ATAPI_MISC = 4, /* the rest */ | 371 | ATAPI_MISC = 4, /* the rest */ |
372 | |||
373 | /* Timing constants */ | ||
374 | ATA_TIMING_SETUP = (1 << 0), | ||
375 | ATA_TIMING_ACT8B = (1 << 1), | ||
376 | ATA_TIMING_REC8B = (1 << 2), | ||
377 | ATA_TIMING_CYC8B = (1 << 3), | ||
378 | ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | | ||
379 | ATA_TIMING_CYC8B, | ||
380 | ATA_TIMING_ACTIVE = (1 << 4), | ||
381 | ATA_TIMING_RECOVER = (1 << 5), | ||
382 | ATA_TIMING_CYCLE = (1 << 6), | ||
383 | ATA_TIMING_UDMA = (1 << 7), | ||
384 | ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | | ||
385 | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | | ||
386 | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | | ||
387 | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, | ||
355 | }; | 388 | }; |
356 | 389 | ||
357 | enum ata_xfer_mask { | 390 | enum ata_xfer_mask { |
@@ -412,6 +445,7 @@ enum link_pm { | |||
412 | }; | 445 | }; |
413 | extern struct class_device_attribute class_device_attr_link_power_management_policy; | 446 | extern struct class_device_attribute class_device_attr_link_power_management_policy; |
414 | 447 | ||
448 | #ifdef CONFIG_ATA_SFF | ||
415 | struct ata_ioports { | 449 | struct ata_ioports { |
416 | void __iomem *cmd_addr; | 450 | void __iomem *cmd_addr; |
417 | void __iomem *data_addr; | 451 | void __iomem *data_addr; |
@@ -429,6 +463,7 @@ struct ata_ioports { | |||
429 | void __iomem *bmdma_addr; | 463 | void __iomem *bmdma_addr; |
430 | void __iomem *scr_addr; | 464 | void __iomem *scr_addr; |
431 | }; | 465 | }; |
466 | #endif /* CONFIG_ATA_SFF */ | ||
432 | 467 | ||
433 | struct ata_host { | 468 | struct ata_host { |
434 | spinlock_t lock; | 469 | spinlock_t lock; |
@@ -436,7 +471,7 @@ struct ata_host { | |||
436 | void __iomem * const *iomap; | 471 | void __iomem * const *iomap; |
437 | unsigned int n_ports; | 472 | unsigned int n_ports; |
438 | void *private_data; | 473 | void *private_data; |
439 | const struct ata_port_operations *ops; | 474 | struct ata_port_operations *ops; |
440 | unsigned long flags; | 475 | unsigned long flags; |
441 | #ifdef CONFIG_ATA_ACPI | 476 | #ifdef CONFIG_ATA_ACPI |
442 | acpi_handle acpi_handle; | 477 | acpi_handle acpi_handle; |
@@ -605,7 +640,7 @@ struct ata_link { | |||
605 | 640 | ||
606 | struct ata_port { | 641 | struct ata_port { |
607 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ | 642 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ |
608 | const struct ata_port_operations *ops; | 643 | struct ata_port_operations *ops; |
609 | spinlock_t *lock; | 644 | spinlock_t *lock; |
610 | unsigned long flags; /* ATA_FLAG_xxx */ | 645 | unsigned long flags; /* ATA_FLAG_xxx */ |
611 | unsigned int pflags; /* ATA_PFLAG_xxx */ | 646 | unsigned int pflags; /* ATA_PFLAG_xxx */ |
@@ -615,7 +650,9 @@ struct ata_port { | |||
615 | struct ata_prd *prd; /* our SG list */ | 650 | struct ata_prd *prd; /* our SG list */ |
616 | dma_addr_t prd_dma; /* and its DMA mapping */ | 651 | dma_addr_t prd_dma; /* and its DMA mapping */ |
617 | 652 | ||
653 | #ifdef CONFIG_ATA_SFF | ||
618 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 654 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
655 | #endif /* CONFIG_ATA_SFF */ | ||
619 | 656 | ||
620 | u8 ctl; /* cache of ATA control register */ | 657 | u8 ctl; /* cache of ATA control register */ |
621 | u8 last_ctl; /* Cache last written value */ | 658 | u8 last_ctl; /* Cache last written value */ |
@@ -667,81 +704,108 @@ struct ata_port { | |||
667 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ | 704 | u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ |
668 | }; | 705 | }; |
669 | 706 | ||
670 | struct ata_port_operations { | 707 | /* The following initializer overrides a method to NULL whether one of |
671 | void (*dev_config) (struct ata_device *); | 708 | * its parent has the method defined or not. This is equivalent to |
672 | 709 | * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant | |
673 | void (*set_piomode) (struct ata_port *, struct ata_device *); | 710 | * expression and thus can't be used as an initializer. |
674 | void (*set_dmamode) (struct ata_port *, struct ata_device *); | 711 | */ |
675 | unsigned long (*mode_filter) (struct ata_device *, unsigned long); | 712 | #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) |
676 | |||
677 | void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); | ||
678 | void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); | ||
679 | |||
680 | void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); | ||
681 | u8 (*check_status)(struct ata_port *ap); | ||
682 | u8 (*check_altstatus)(struct ata_port *ap); | ||
683 | void (*dev_select)(struct ata_port *ap, unsigned int device); | ||
684 | |||
685 | void (*phy_reset) (struct ata_port *ap); /* obsolete */ | ||
686 | int (*set_mode) (struct ata_link *link, struct ata_device **r_failed_dev); | ||
687 | |||
688 | int (*cable_detect) (struct ata_port *ap); | ||
689 | |||
690 | int (*check_atapi_dma) (struct ata_queued_cmd *qc); | ||
691 | |||
692 | void (*bmdma_setup) (struct ata_queued_cmd *qc); | ||
693 | void (*bmdma_start) (struct ata_queued_cmd *qc); | ||
694 | |||
695 | unsigned int (*data_xfer) (struct ata_device *dev, unsigned char *buf, | ||
696 | unsigned int buflen, int rw); | ||
697 | |||
698 | int (*qc_defer) (struct ata_queued_cmd *qc); | ||
699 | void (*qc_prep) (struct ata_queued_cmd *qc); | ||
700 | unsigned int (*qc_issue) (struct ata_queued_cmd *qc); | ||
701 | |||
702 | /* port multiplier */ | ||
703 | void (*pmp_attach) (struct ata_port *ap); | ||
704 | void (*pmp_detach) (struct ata_port *ap); | ||
705 | 713 | ||
706 | /* Error handlers. ->error_handler overrides ->eng_timeout and | 714 | struct ata_port_operations { |
707 | * indicates that new-style EH is in place. | 715 | /* |
716 | * Command execution | ||
708 | */ | 717 | */ |
709 | void (*eng_timeout) (struct ata_port *ap); /* obsolete */ | 718 | int (*qc_defer)(struct ata_queued_cmd *qc); |
710 | 719 | int (*check_atapi_dma)(struct ata_queued_cmd *qc); | |
711 | void (*freeze) (struct ata_port *ap); | 720 | void (*qc_prep)(struct ata_queued_cmd *qc); |
712 | void (*thaw) (struct ata_port *ap); | 721 | unsigned int (*qc_issue)(struct ata_queued_cmd *qc); |
713 | void (*error_handler) (struct ata_port *ap); | 722 | bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); |
714 | void (*post_internal_cmd) (struct ata_queued_cmd *qc); | 723 | |
715 | 724 | /* | |
716 | irq_handler_t irq_handler; | 725 | * Configuration and exception handling |
717 | void (*irq_clear) (struct ata_port *); | 726 | */ |
718 | u8 (*irq_on) (struct ata_port *); | 727 | int (*cable_detect)(struct ata_port *ap); |
719 | 728 | unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); | |
720 | int (*scr_read) (struct ata_port *ap, unsigned int sc_reg, u32 *val); | 729 | void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); |
721 | int (*scr_write) (struct ata_port *ap, unsigned int sc_reg, u32 val); | 730 | void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); |
731 | int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); | ||
732 | |||
733 | void (*dev_config)(struct ata_device *dev); | ||
734 | |||
735 | void (*freeze)(struct ata_port *ap); | ||
736 | void (*thaw)(struct ata_port *ap); | ||
737 | ata_prereset_fn_t prereset; | ||
738 | ata_reset_fn_t softreset; | ||
739 | ata_reset_fn_t hardreset; | ||
740 | ata_postreset_fn_t postreset; | ||
741 | ata_prereset_fn_t pmp_prereset; | ||
742 | ata_reset_fn_t pmp_softreset; | ||
743 | ata_reset_fn_t pmp_hardreset; | ||
744 | ata_postreset_fn_t pmp_postreset; | ||
745 | void (*error_handler)(struct ata_port *ap); | ||
746 | void (*post_internal_cmd)(struct ata_queued_cmd *qc); | ||
747 | |||
748 | /* | ||
749 | * Optional features | ||
750 | */ | ||
751 | int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); | ||
752 | int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); | ||
753 | void (*pmp_attach)(struct ata_port *ap); | ||
754 | void (*pmp_detach)(struct ata_port *ap); | ||
755 | int (*enable_pm)(struct ata_port *ap, enum link_pm policy); | ||
756 | void (*disable_pm)(struct ata_port *ap); | ||
757 | |||
758 | /* | ||
759 | * Start, stop, suspend and resume | ||
760 | */ | ||
761 | int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); | ||
762 | int (*port_resume)(struct ata_port *ap); | ||
763 | int (*port_start)(struct ata_port *ap); | ||
764 | void (*port_stop)(struct ata_port *ap); | ||
765 | void (*host_stop)(struct ata_host *host); | ||
766 | |||
767 | #ifdef CONFIG_ATA_SFF | ||
768 | /* | ||
769 | * SFF / taskfile oriented ops | ||
770 | */ | ||
771 | void (*sff_dev_select)(struct ata_port *ap, unsigned int device); | ||
772 | u8 (*sff_check_status)(struct ata_port *ap); | ||
773 | u8 (*sff_check_altstatus)(struct ata_port *ap); | ||
774 | void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); | ||
775 | void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); | ||
776 | void (*sff_exec_command)(struct ata_port *ap, | ||
777 | const struct ata_taskfile *tf); | ||
778 | unsigned int (*sff_data_xfer)(struct ata_device *dev, | ||
779 | unsigned char *buf, unsigned int buflen, int rw); | ||
780 | u8 (*sff_irq_on)(struct ata_port *); | ||
781 | void (*sff_irq_clear)(struct ata_port *); | ||
722 | 782 | ||
723 | int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); | 783 | void (*bmdma_setup)(struct ata_queued_cmd *qc); |
724 | int (*port_resume) (struct ata_port *ap); | 784 | void (*bmdma_start)(struct ata_queued_cmd *qc); |
725 | int (*enable_pm) (struct ata_port *ap, enum link_pm policy); | 785 | void (*bmdma_stop)(struct ata_queued_cmd *qc); |
726 | void (*disable_pm) (struct ata_port *ap); | 786 | u8 (*bmdma_status)(struct ata_port *ap); |
727 | int (*port_start) (struct ata_port *ap); | 787 | #endif /* CONFIG_ATA_SFF */ |
728 | void (*port_stop) (struct ata_port *ap); | ||
729 | 788 | ||
730 | void (*host_stop) (struct ata_host *host); | 789 | /* |
790 | * Obsolete | ||
791 | */ | ||
792 | void (*phy_reset)(struct ata_port *ap); | ||
793 | void (*eng_timeout)(struct ata_port *ap); | ||
731 | 794 | ||
732 | void (*bmdma_stop) (struct ata_queued_cmd *qc); | 795 | /* |
733 | u8 (*bmdma_status) (struct ata_port *ap); | 796 | * ->inherits must be the last field and all the preceding |
797 | * fields must be pointers. | ||
798 | */ | ||
799 | const struct ata_port_operations *inherits; | ||
734 | }; | 800 | }; |
735 | 801 | ||
736 | struct ata_port_info { | 802 | struct ata_port_info { |
737 | struct scsi_host_template *sht; | ||
738 | unsigned long flags; | 803 | unsigned long flags; |
739 | unsigned long link_flags; | 804 | unsigned long link_flags; |
740 | unsigned long pio_mask; | 805 | unsigned long pio_mask; |
741 | unsigned long mwdma_mask; | 806 | unsigned long mwdma_mask; |
742 | unsigned long udma_mask; | 807 | unsigned long udma_mask; |
743 | const struct ata_port_operations *port_ops; | 808 | struct ata_port_operations *port_ops; |
744 | irq_handler_t irq_handler; | ||
745 | void *private_data; | 809 | void *private_data; |
746 | }; | 810 | }; |
747 | 811 | ||
@@ -759,11 +823,14 @@ struct ata_timing { | |||
759 | 823 | ||
760 | #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) | 824 | #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) |
761 | 825 | ||
826 | /* | ||
827 | * Core layer - drivers/ata/libata-core.c | ||
828 | */ | ||
762 | extern const unsigned long sata_deb_timing_normal[]; | 829 | extern const unsigned long sata_deb_timing_normal[]; |
763 | extern const unsigned long sata_deb_timing_hotplug[]; | 830 | extern const unsigned long sata_deb_timing_hotplug[]; |
764 | extern const unsigned long sata_deb_timing_long[]; | 831 | extern const unsigned long sata_deb_timing_long[]; |
765 | 832 | ||
766 | extern const struct ata_port_operations ata_dummy_port_ops; | 833 | extern struct ata_port_operations ata_dummy_port_ops; |
767 | extern const struct ata_port_info ata_dummy_port_info; | 834 | extern const struct ata_port_info ata_dummy_port_info; |
768 | 835 | ||
769 | static inline const unsigned long * | 836 | static inline const unsigned long * |
@@ -782,22 +849,21 @@ static inline int ata_port_is_dummy(struct ata_port *ap) | |||
782 | 849 | ||
783 | extern void sata_print_link_status(struct ata_link *link); | 850 | extern void sata_print_link_status(struct ata_link *link); |
784 | extern void ata_port_probe(struct ata_port *); | 851 | extern void ata_port_probe(struct ata_port *); |
785 | extern void ata_bus_reset(struct ata_port *ap); | ||
786 | extern int sata_set_spd(struct ata_link *link); | 852 | extern int sata_set_spd(struct ata_link *link); |
853 | extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); | ||
854 | extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, | ||
855 | int (*check_ready)(struct ata_link *link)); | ||
787 | extern int sata_link_debounce(struct ata_link *link, | 856 | extern int sata_link_debounce(struct ata_link *link, |
788 | const unsigned long *params, unsigned long deadline); | 857 | const unsigned long *params, unsigned long deadline); |
789 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, | 858 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, |
790 | unsigned long deadline); | 859 | unsigned long deadline); |
791 | extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); | ||
792 | extern int ata_std_softreset(struct ata_link *link, unsigned int *classes, | ||
793 | unsigned long deadline); | ||
794 | extern int sata_link_hardreset(struct ata_link *link, | 860 | extern int sata_link_hardreset(struct ata_link *link, |
795 | const unsigned long *timing, unsigned long deadline); | 861 | const unsigned long *timing, unsigned long deadline, |
862 | bool *online, int (*check_ready)(struct ata_link *)); | ||
796 | extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, | 863 | extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, |
797 | unsigned long deadline); | 864 | unsigned long deadline); |
798 | extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); | 865 | extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); |
799 | extern void ata_port_disable(struct ata_port *); | 866 | extern void ata_port_disable(struct ata_port *); |
800 | extern void ata_std_ports(struct ata_ioports *ioaddr); | ||
801 | 867 | ||
802 | extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); | 868 | extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); |
803 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, | 869 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
@@ -810,7 +876,7 @@ extern int ata_host_activate(struct ata_host *host, int irq, | |||
810 | struct scsi_host_template *sht); | 876 | struct scsi_host_template *sht); |
811 | extern void ata_host_detach(struct ata_host *host); | 877 | extern void ata_host_detach(struct ata_host *host); |
812 | extern void ata_host_init(struct ata_host *, struct device *, | 878 | extern void ata_host_init(struct ata_host *, struct device *, |
813 | unsigned long, const struct ata_port_operations *); | 879 | unsigned long, struct ata_port_operations *); |
814 | extern int ata_scsi_detect(struct scsi_host_template *sht); | 880 | extern int ata_scsi_detect(struct scsi_host_template *sht); |
815 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 881 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
816 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); | 882 | extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); |
@@ -823,7 +889,6 @@ extern void ata_sas_port_stop(struct ata_port *ap); | |||
823 | extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); | 889 | extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); |
824 | extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), | 890 | extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), |
825 | struct ata_port *ap); | 891 | struct ata_port *ap); |
826 | extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); | ||
827 | extern int sata_scr_valid(struct ata_link *link); | 892 | extern int sata_scr_valid(struct ata_link *link); |
828 | extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); | 893 | extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); |
829 | extern int sata_scr_write(struct ata_link *link, int reg, u32 val); | 894 | extern int sata_scr_write(struct ata_link *link, int reg, u32 val); |
@@ -835,21 +900,9 @@ extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); | |||
835 | extern void ata_host_resume(struct ata_host *host); | 900 | extern void ata_host_resume(struct ata_host *host); |
836 | #endif | 901 | #endif |
837 | extern int ata_ratelimit(void); | 902 | extern int ata_ratelimit(void); |
838 | extern int ata_busy_sleep(struct ata_port *ap, | ||
839 | unsigned long timeout_pat, unsigned long timeout); | ||
840 | extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline); | ||
841 | extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); | ||
842 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 903 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
843 | unsigned long interval_msec, | 904 | unsigned long interval_msec, |
844 | unsigned long timeout_msec); | 905 | unsigned long timeout_msec); |
845 | extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present, | ||
846 | u8 *r_err); | ||
847 | |||
848 | /* | ||
849 | * Default driver ops implementations | ||
850 | */ | ||
851 | extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
852 | extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
853 | extern int atapi_cmd_type(u8 opcode); | 906 | extern int atapi_cmd_type(u8 opcode); |
854 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, | 907 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, |
855 | u8 pmp, int is_cmd, u8 *fis); | 908 | u8 pmp, int is_cmd, u8 *fis); |
@@ -864,23 +917,9 @@ extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); | |||
864 | extern int ata_xfer_mode2shift(unsigned long xfer_mode); | 917 | extern int ata_xfer_mode2shift(unsigned long xfer_mode); |
865 | extern const char *ata_mode_string(unsigned long xfer_mask); | 918 | extern const char *ata_mode_string(unsigned long xfer_mask); |
866 | extern unsigned long ata_id_xfermask(const u16 *id); | 919 | extern unsigned long ata_id_xfermask(const u16 *id); |
867 | extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); | ||
868 | extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); | ||
869 | extern u8 ata_check_status(struct ata_port *ap); | ||
870 | extern u8 ata_altstatus(struct ata_port *ap); | ||
871 | extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); | ||
872 | extern int ata_port_start(struct ata_port *ap); | 920 | extern int ata_port_start(struct ata_port *ap); |
873 | extern int ata_sff_port_start(struct ata_port *ap); | ||
874 | extern irqreturn_t ata_interrupt(int irq, void *dev_instance); | ||
875 | extern unsigned int ata_data_xfer(struct ata_device *dev, | ||
876 | unsigned char *buf, unsigned int buflen, int rw); | ||
877 | extern unsigned int ata_data_xfer_noirq(struct ata_device *dev, | ||
878 | unsigned char *buf, unsigned int buflen, int rw); | ||
879 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); | 921 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); |
880 | extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); | ||
881 | extern void ata_qc_prep(struct ata_queued_cmd *qc); | ||
882 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); | 922 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); |
883 | extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); | ||
884 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | 923 | extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
885 | unsigned int n_elem); | 924 | unsigned int n_elem); |
886 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); | 925 | extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); |
@@ -889,24 +928,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, | |||
889 | unsigned int ofs, unsigned int len); | 928 | unsigned int ofs, unsigned int len); |
890 | extern void ata_id_c_string(const u16 *id, unsigned char *s, | 929 | extern void ata_id_c_string(const u16 *id, unsigned char *s, |
891 | unsigned int ofs, unsigned int len); | 930 | unsigned int ofs, unsigned int len); |
892 | extern void ata_bmdma_setup(struct ata_queued_cmd *qc); | ||
893 | extern void ata_bmdma_start(struct ata_queued_cmd *qc); | ||
894 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | ||
895 | extern u8 ata_bmdma_status(struct ata_port *ap); | ||
896 | extern void ata_bmdma_irq_clear(struct ata_port *ap); | ||
897 | extern void ata_bmdma_freeze(struct ata_port *ap); | ||
898 | extern void ata_bmdma_thaw(struct ata_port *ap); | ||
899 | extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | ||
900 | ata_reset_fn_t softreset, | ||
901 | ata_reset_fn_t hardreset, | ||
902 | ata_postreset_fn_t postreset); | ||
903 | extern void ata_bmdma_error_handler(struct ata_port *ap); | ||
904 | extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); | ||
905 | extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
906 | u8 status, int in_wq); | ||
907 | extern void ata_qc_complete(struct ata_queued_cmd *qc); | 931 | extern void ata_qc_complete(struct ata_queued_cmd *qc); |
908 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, | 932 | extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); |
909 | void (*finish_qc)(struct ata_queued_cmd *)); | ||
910 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, | 933 | extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, |
911 | void (*done)(struct scsi_cmnd *)); | 934 | void (*done)(struct scsi_cmnd *)); |
912 | extern int ata_std_bios_param(struct scsi_device *sdev, | 935 | extern int ata_std_bios_param(struct scsi_device *sdev, |
@@ -918,7 +941,6 @@ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, | |||
918 | int queue_depth); | 941 | int queue_depth); |
919 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); | 942 | extern struct ata_device *ata_dev_pair(struct ata_device *adev); |
920 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); | 943 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
921 | extern u8 ata_irq_on(struct ata_port *ap); | ||
922 | 944 | ||
923 | extern int ata_cable_40wire(struct ata_port *ap); | 945 | extern int ata_cable_40wire(struct ata_port *ap); |
924 | extern int ata_cable_80wire(struct ata_port *ap); | 946 | extern int ata_cable_80wire(struct ata_port *ap); |
@@ -926,10 +948,7 @@ extern int ata_cable_sata(struct ata_port *ap); | |||
926 | extern int ata_cable_ignore(struct ata_port *ap); | 948 | extern int ata_cable_ignore(struct ata_port *ap); |
927 | extern int ata_cable_unknown(struct ata_port *ap); | 949 | extern int ata_cable_unknown(struct ata_port *ap); |
928 | 950 | ||
929 | /* | 951 | /* Timing helpers */ |
930 | * Timing helpers | ||
931 | */ | ||
932 | |||
933 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); | 952 | extern unsigned int ata_pio_need_iordy(const struct ata_device *); |
934 | extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); | 953 | extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); |
935 | extern int ata_timing_compute(struct ata_device *, unsigned short, | 954 | extern int ata_timing_compute(struct ata_device *, unsigned short, |
@@ -939,24 +958,31 @@ extern void ata_timing_merge(const struct ata_timing *, | |||
939 | unsigned int); | 958 | unsigned int); |
940 | extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); | 959 | extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); |
941 | 960 | ||
942 | enum { | 961 | /* PCI */ |
943 | ATA_TIMING_SETUP = (1 << 0), | 962 | #ifdef CONFIG_PCI |
944 | ATA_TIMING_ACT8B = (1 << 1), | 963 | struct pci_dev; |
945 | ATA_TIMING_REC8B = (1 << 2), | 964 | |
946 | ATA_TIMING_CYC8B = (1 << 3), | 965 | struct pci_bits { |
947 | ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | | 966 | unsigned int reg; /* PCI config register to read */ |
948 | ATA_TIMING_CYC8B, | 967 | unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ |
949 | ATA_TIMING_ACTIVE = (1 << 4), | 968 | unsigned long mask; |
950 | ATA_TIMING_RECOVER = (1 << 5), | 969 | unsigned long val; |
951 | ATA_TIMING_CYCLE = (1 << 6), | ||
952 | ATA_TIMING_UDMA = (1 << 7), | ||
953 | ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | | ||
954 | ATA_TIMING_REC8B | ATA_TIMING_CYC8B | | ||
955 | ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | | ||
956 | ATA_TIMING_CYCLE | ATA_TIMING_UDMA, | ||
957 | }; | 970 | }; |
958 | 971 | ||
959 | /* libata-acpi.c */ | 972 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); |
973 | extern void ata_pci_remove_one(struct pci_dev *pdev); | ||
974 | |||
975 | #ifdef CONFIG_PM | ||
976 | extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
977 | extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); | ||
978 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
979 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
980 | #endif /* CONFIG_PM */ | ||
981 | #endif /* CONFIG_PCI */ | ||
982 | |||
983 | /* | ||
984 | * ACPI - drivers/ata/libata-acpi.c | ||
985 | */ | ||
960 | #ifdef CONFIG_ATA_ACPI | 986 | #ifdef CONFIG_ATA_ACPI |
961 | static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) | 987 | static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) |
962 | { | 988 | { |
@@ -1000,56 +1026,8 @@ static inline int ata_acpi_cbl_80wire(struct ata_port *ap, | |||
1000 | } | 1026 | } |
1001 | #endif | 1027 | #endif |
1002 | 1028 | ||
1003 | #ifdef CONFIG_PCI | ||
1004 | struct pci_dev; | ||
1005 | |||
1006 | extern int ata_pci_init_one(struct pci_dev *pdev, | ||
1007 | const struct ata_port_info * const * ppi); | ||
1008 | extern void ata_pci_remove_one(struct pci_dev *pdev); | ||
1009 | #ifdef CONFIG_PM | ||
1010 | extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
1011 | extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); | ||
1012 | extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | ||
1013 | extern int ata_pci_device_resume(struct pci_dev *pdev); | ||
1014 | #endif | ||
1015 | extern int ata_pci_clear_simplex(struct pci_dev *pdev); | ||
1016 | |||
1017 | struct pci_bits { | ||
1018 | unsigned int reg; /* PCI config register to read */ | ||
1019 | unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ | ||
1020 | unsigned long mask; | ||
1021 | unsigned long val; | ||
1022 | }; | ||
1023 | |||
1024 | extern int ata_pci_init_sff_host(struct ata_host *host); | ||
1025 | extern int ata_pci_init_bmdma(struct ata_host *host); | ||
1026 | extern int ata_pci_prepare_sff_host(struct pci_dev *pdev, | ||
1027 | const struct ata_port_info * const * ppi, | ||
1028 | struct ata_host **r_host); | ||
1029 | extern int ata_pci_activate_sff_host(struct ata_host *host, | ||
1030 | irq_handler_t irq_handler, | ||
1031 | struct scsi_host_template *sht); | ||
1032 | extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); | ||
1033 | extern unsigned long ata_pci_default_filter(struct ata_device *dev, | ||
1034 | unsigned long xfer_mask); | ||
1035 | #endif /* CONFIG_PCI */ | ||
1036 | |||
1037 | /* | ||
1038 | * PMP | ||
1039 | */ | ||
1040 | extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); | ||
1041 | extern int sata_pmp_std_prereset(struct ata_link *link, unsigned long deadline); | ||
1042 | extern int sata_pmp_std_hardreset(struct ata_link *link, unsigned int *class, | ||
1043 | unsigned long deadline); | ||
1044 | extern void sata_pmp_std_postreset(struct ata_link *link, unsigned int *class); | ||
1045 | extern void sata_pmp_do_eh(struct ata_port *ap, | ||
1046 | ata_prereset_fn_t prereset, ata_reset_fn_t softreset, | ||
1047 | ata_reset_fn_t hardreset, ata_postreset_fn_t postreset, | ||
1048 | ata_prereset_fn_t pmp_prereset, ata_reset_fn_t pmp_softreset, | ||
1049 | ata_reset_fn_t pmp_hardreset, ata_postreset_fn_t pmp_postreset); | ||
1050 | |||
1051 | /* | 1029 | /* |
1052 | * EH | 1030 | * EH - drivers/ata/libata-eh.c |
1053 | */ | 1031 | */ |
1054 | extern void ata_port_schedule_eh(struct ata_port *ap); | 1032 | extern void ata_port_schedule_eh(struct ata_port *ap); |
1055 | extern int ata_link_abort(struct ata_link *link); | 1033 | extern int ata_link_abort(struct ata_link *link); |
@@ -1066,6 +1044,92 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); | |||
1066 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | 1044 | extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, |
1067 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, | 1045 | ata_reset_fn_t softreset, ata_reset_fn_t hardreset, |
1068 | ata_postreset_fn_t postreset); | 1046 | ata_postreset_fn_t postreset); |
1047 | extern void ata_std_error_handler(struct ata_port *ap); | ||
1048 | |||
1049 | /* | ||
1050 | * Base operations to inherit from and initializers for sht | ||
1051 | * | ||
1052 | * Operations | ||
1053 | * | ||
1054 | * base : Common to all libata drivers. | ||
1055 | * sata : SATA controllers w/ native interface. | ||
1056 | * pmp : SATA controllers w/ PMP support. | ||
1057 | * sff : SFF ATA controllers w/o BMDMA support. | ||
1058 | * bmdma : SFF ATA controllers w/ BMDMA support. | ||
1059 | * | ||
1060 | * sht initializers | ||
1061 | * | ||
1062 | * BASE : Common to all libata drivers. The user must set | ||
1063 | * sg_tablesize and dma_boundary. | ||
1064 | * PIO : SFF ATA controllers w/ only PIO support. | ||
1065 | * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and | ||
1066 | * dma_boundary are set to BMDMA limits. | ||
1067 | * NCQ : SATA controllers supporting NCQ. The user must set | ||
1068 | * sg_tablesize, dma_boundary and can_queue. | ||
1069 | */ | ||
1070 | extern const struct ata_port_operations ata_base_port_ops; | ||
1071 | extern const struct ata_port_operations sata_port_ops; | ||
1072 | |||
1073 | #define ATA_BASE_SHT(drv_name) \ | ||
1074 | .module = THIS_MODULE, \ | ||
1075 | .name = drv_name, \ | ||
1076 | .ioctl = ata_scsi_ioctl, \ | ||
1077 | .queuecommand = ata_scsi_queuecmd, \ | ||
1078 | .can_queue = ATA_DEF_QUEUE, \ | ||
1079 | .this_id = ATA_SHT_THIS_ID, \ | ||
1080 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ | ||
1081 | .emulated = ATA_SHT_EMULATED, \ | ||
1082 | .use_clustering = ATA_SHT_USE_CLUSTERING, \ | ||
1083 | .proc_name = drv_name, \ | ||
1084 | .slave_configure = ata_scsi_slave_config, \ | ||
1085 | .slave_destroy = ata_scsi_slave_destroy, \ | ||
1086 | .bios_param = ata_std_bios_param | ||
1087 | |||
1088 | #define ATA_NCQ_SHT(drv_name) \ | ||
1089 | ATA_BASE_SHT(drv_name), \ | ||
1090 | .change_queue_depth = ata_scsi_change_queue_depth | ||
1091 | |||
1092 | /* | ||
1093 | * PMP helpers | ||
1094 | */ | ||
1095 | #ifdef CONFIG_SATA_PMP | ||
1096 | static inline bool sata_pmp_supported(struct ata_port *ap) | ||
1097 | { | ||
1098 | return ap->flags & ATA_FLAG_PMP; | ||
1099 | } | ||
1100 | |||
1101 | static inline bool sata_pmp_attached(struct ata_port *ap) | ||
1102 | { | ||
1103 | return ap->nr_pmp_links != 0; | ||
1104 | } | ||
1105 | |||
1106 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1107 | { | ||
1108 | return link == &link->ap->link; | ||
1109 | } | ||
1110 | #else /* CONFIG_SATA_PMP */ | ||
1111 | static inline bool sata_pmp_supported(struct ata_port *ap) | ||
1112 | { | ||
1113 | return false; | ||
1114 | } | ||
1115 | |||
1116 | static inline bool sata_pmp_attached(struct ata_port *ap) | ||
1117 | { | ||
1118 | return false; | ||
1119 | } | ||
1120 | |||
1121 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1122 | { | ||
1123 | return 1; | ||
1124 | } | ||
1125 | #endif /* CONFIG_SATA_PMP */ | ||
1126 | |||
1127 | static inline int sata_srst_pmp(struct ata_link *link) | ||
1128 | { | ||
1129 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) | ||
1130 | return SATA_PMP_CTRL_PORT; | ||
1131 | return link->pmp; | ||
1132 | } | ||
1069 | 1133 | ||
1070 | /* | 1134 | /* |
1071 | * printk helpers | 1135 | * printk helpers |
@@ -1074,7 +1138,7 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
1074 | printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) | 1138 | printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) |
1075 | 1139 | ||
1076 | #define ata_link_printk(link, lv, fmt, args...) do { \ | 1140 | #define ata_link_printk(link, lv, fmt, args...) do { \ |
1077 | if ((link)->ap->nr_pmp_links) \ | 1141 | if (sata_pmp_attached((link)->ap)) \ |
1078 | printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ | 1142 | printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ |
1079 | (link)->pmp , ##args); \ | 1143 | (link)->pmp , ##args); \ |
1080 | else \ | 1144 | else \ |
@@ -1094,18 +1158,11 @@ extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) | |||
1094 | __attribute__ ((format (printf, 2, 3))); | 1158 | __attribute__ ((format (printf, 2, 3))); |
1095 | extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); | 1159 | extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); |
1096 | 1160 | ||
1097 | static inline void ata_ehi_schedule_probe(struct ata_eh_info *ehi) | ||
1098 | { | ||
1099 | ehi->flags |= ATA_EHI_RESUME_LINK; | ||
1100 | ehi->action |= ATA_EH_SOFTRESET; | ||
1101 | ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; | ||
1102 | } | ||
1103 | |||
1104 | static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) | 1161 | static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) |
1105 | { | 1162 | { |
1106 | ata_ehi_schedule_probe(ehi); | 1163 | ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; |
1107 | ehi->flags |= ATA_EHI_HOTPLUGGED; | 1164 | ehi->flags |= ATA_EHI_HOTPLUGGED; |
1108 | ehi->action |= ATA_EH_ENABLE_LINK; | 1165 | ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; |
1109 | ehi->err_mask |= AC_ERR_ATA_BUS; | 1166 | ehi->err_mask |= AC_ERR_ATA_BUS; |
1110 | } | 1167 | } |
1111 | 1168 | ||
@@ -1126,7 +1183,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag) | |||
1126 | 1183 | ||
1127 | static inline unsigned int ata_tag_internal(unsigned int tag) | 1184 | static inline unsigned int ata_tag_internal(unsigned int tag) |
1128 | { | 1185 | { |
1129 | return tag == ATA_MAX_QUEUE - 1; | 1186 | return tag == ATA_TAG_INTERNAL; |
1130 | } | 1187 | } |
1131 | 1188 | ||
1132 | /* | 1189 | /* |
@@ -1167,11 +1224,6 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev) | |||
1167 | /* | 1224 | /* |
1168 | * link helpers | 1225 | * link helpers |
1169 | */ | 1226 | */ |
1170 | static inline int ata_is_host_link(const struct ata_link *link) | ||
1171 | { | ||
1172 | return link == &link->ap->link; | ||
1173 | } | ||
1174 | |||
1175 | static inline int ata_link_max_devices(const struct ata_link *link) | 1227 | static inline int ata_link_max_devices(const struct ata_link *link) |
1176 | { | 1228 | { |
1177 | if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) | 1229 | if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) |
@@ -1186,7 +1238,7 @@ static inline int ata_link_active(struct ata_link *link) | |||
1186 | 1238 | ||
1187 | static inline struct ata_link *ata_port_first_link(struct ata_port *ap) | 1239 | static inline struct ata_link *ata_port_first_link(struct ata_port *ap) |
1188 | { | 1240 | { |
1189 | if (ap->nr_pmp_links) | 1241 | if (sata_pmp_attached(ap)) |
1190 | return ap->pmp_link; | 1242 | return ap->pmp_link; |
1191 | return &ap->link; | 1243 | return &ap->link; |
1192 | } | 1244 | } |
@@ -1195,8 +1247,8 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link) | |||
1195 | { | 1247 | { |
1196 | struct ata_port *ap = link->ap; | 1248 | struct ata_port *ap = link->ap; |
1197 | 1249 | ||
1198 | if (link == &ap->link) { | 1250 | if (ata_is_host_link(link)) { |
1199 | if (!ap->nr_pmp_links) | 1251 | if (!sata_pmp_attached(ap)) |
1200 | return NULL; | 1252 | return NULL; |
1201 | return ap->pmp_link; | 1253 | return ap->pmp_link; |
1202 | } | 1254 | } |
@@ -1222,11 +1274,6 @@ static inline struct ata_link *ata_port_next_link(struct ata_link *link) | |||
1222 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ | 1274 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ |
1223 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) | 1275 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) |
1224 | 1276 | ||
1225 | static inline u8 ata_chk_status(struct ata_port *ap) | ||
1226 | { | ||
1227 | return ap->ops->check_status(ap); | ||
1228 | } | ||
1229 | |||
1230 | /** | 1277 | /** |
1231 | * ata_ncq_enabled - Test whether NCQ is enabled | 1278 | * ata_ncq_enabled - Test whether NCQ is enabled |
1232 | * @dev: ATA device to test for | 1279 | * @dev: ATA device to test for |
@@ -1243,74 +1290,6 @@ static inline int ata_ncq_enabled(struct ata_device *dev) | |||
1243 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; | 1290 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; |
1244 | } | 1291 | } |
1245 | 1292 | ||
1246 | /** | ||
1247 | * ata_pause - Flush writes and pause 400 nanoseconds. | ||
1248 | * @ap: Port to wait for. | ||
1249 | * | ||
1250 | * LOCKING: | ||
1251 | * Inherited from caller. | ||
1252 | */ | ||
1253 | |||
1254 | static inline void ata_pause(struct ata_port *ap) | ||
1255 | { | ||
1256 | ata_altstatus(ap); | ||
1257 | ndelay(400); | ||
1258 | } | ||
1259 | |||
1260 | |||
1261 | /** | ||
1262 | * ata_busy_wait - Wait for a port status register | ||
1263 | * @ap: Port to wait for. | ||
1264 | * @bits: bits that must be clear | ||
1265 | * @max: number of 10uS waits to perform | ||
1266 | * | ||
1267 | * Waits up to max*10 microseconds for the selected bits in the port's | ||
1268 | * status register to be cleared. | ||
1269 | * Returns final value of status register. | ||
1270 | * | ||
1271 | * LOCKING: | ||
1272 | * Inherited from caller. | ||
1273 | */ | ||
1274 | |||
1275 | static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, | ||
1276 | unsigned int max) | ||
1277 | { | ||
1278 | u8 status; | ||
1279 | |||
1280 | do { | ||
1281 | udelay(10); | ||
1282 | status = ata_chk_status(ap); | ||
1283 | max--; | ||
1284 | } while (status != 0xff && (status & bits) && (max > 0)); | ||
1285 | |||
1286 | return status; | ||
1287 | } | ||
1288 | |||
1289 | |||
1290 | /** | ||
1291 | * ata_wait_idle - Wait for a port to be idle. | ||
1292 | * @ap: Port to wait for. | ||
1293 | * | ||
1294 | * Waits up to 10ms for port's BUSY and DRQ signals to clear. | ||
1295 | * Returns final value of status register. | ||
1296 | * | ||
1297 | * LOCKING: | ||
1298 | * Inherited from caller. | ||
1299 | */ | ||
1300 | |||
1301 | static inline u8 ata_wait_idle(struct ata_port *ap) | ||
1302 | { | ||
1303 | u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | ||
1304 | |||
1305 | #ifdef ATA_DEBUG | ||
1306 | if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) | ||
1307 | ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", | ||
1308 | status); | ||
1309 | #endif | ||
1310 | |||
1311 | return status; | ||
1312 | } | ||
1313 | |||
1314 | static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) | 1293 | static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) |
1315 | { | 1294 | { |
1316 | qc->tf.ctl |= ATA_NIEN; | 1295 | qc->tf.ctl |= ATA_NIEN; |
@@ -1403,4 +1382,171 @@ static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) | |||
1403 | return *(struct ata_port **)&host->hostdata[0]; | 1382 | return *(struct ata_port **)&host->hostdata[0]; |
1404 | } | 1383 | } |
1405 | 1384 | ||
1385 | |||
1386 | /************************************************************************** | ||
1387 | * PMP - drivers/ata/libata-pmp.c | ||
1388 | */ | ||
1389 | #ifdef CONFIG_SATA_PMP | ||
1390 | |||
1391 | extern const struct ata_port_operations sata_pmp_port_ops; | ||
1392 | |||
1393 | extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); | ||
1394 | extern void sata_pmp_error_handler(struct ata_port *ap); | ||
1395 | |||
1396 | #else /* CONFIG_SATA_PMP */ | ||
1397 | |||
1398 | #define sata_pmp_port_ops sata_port_ops | ||
1399 | #define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer | ||
1400 | #define sata_pmp_error_handler ata_std_error_handler | ||
1401 | |||
1402 | #endif /* CONFIG_SATA_PMP */ | ||
1403 | |||
1404 | |||
1405 | /************************************************************************** | ||
1406 | * SFF - drivers/ata/libata-sff.c | ||
1407 | */ | ||
1408 | #ifdef CONFIG_ATA_SFF | ||
1409 | |||
1410 | extern const struct ata_port_operations ata_sff_port_ops; | ||
1411 | extern const struct ata_port_operations ata_bmdma_port_ops; | ||
1412 | |||
1413 | /* PIO only, sg_tablesize and dma_boundary limits can be removed */ | ||
1414 | #define ATA_PIO_SHT(drv_name) \ | ||
1415 | ATA_BASE_SHT(drv_name), \ | ||
1416 | .sg_tablesize = LIBATA_MAX_PRD, \ | ||
1417 | .dma_boundary = ATA_DMA_BOUNDARY | ||
1418 | |||
1419 | #define ATA_BMDMA_SHT(drv_name) \ | ||
1420 | ATA_BASE_SHT(drv_name), \ | ||
1421 | .sg_tablesize = LIBATA_MAX_PRD, \ | ||
1422 | .dma_boundary = ATA_DMA_BOUNDARY | ||
1423 | |||
1424 | extern void ata_sff_qc_prep(struct ata_queued_cmd *qc); | ||
1425 | extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc); | ||
1426 | extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); | ||
1427 | extern u8 ata_sff_check_status(struct ata_port *ap); | ||
1428 | extern u8 ata_sff_altstatus(struct ata_port *ap); | ||
1429 | extern int ata_sff_busy_sleep(struct ata_port *ap, | ||
1430 | unsigned long timeout_pat, unsigned long timeout); | ||
1431 | extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); | ||
1432 | extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); | ||
1433 | extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | ||
1434 | extern void ata_sff_exec_command(struct ata_port *ap, | ||
1435 | const struct ata_taskfile *tf); | ||
1436 | extern unsigned int ata_sff_data_xfer(struct ata_device *dev, | ||
1437 | unsigned char *buf, unsigned int buflen, int rw); | ||
1438 | extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, | ||
1439 | unsigned char *buf, unsigned int buflen, int rw); | ||
1440 | extern u8 ata_sff_irq_on(struct ata_port *ap); | ||
1441 | extern void ata_sff_irq_clear(struct ata_port *ap); | ||
1442 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | ||
1443 | u8 status, int in_wq); | ||
1444 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | ||
1445 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | ||
1446 | extern unsigned int ata_sff_host_intr(struct ata_port *ap, | ||
1447 | struct ata_queued_cmd *qc); | ||
1448 | extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); | ||
1449 | extern void ata_sff_freeze(struct ata_port *ap); | ||
1450 | extern void ata_sff_thaw(struct ata_port *ap); | ||
1451 | extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); | ||
1452 | extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, | ||
1453 | u8 *r_err); | ||
1454 | extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, | ||
1455 | unsigned long deadline); | ||
1456 | extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, | ||
1457 | unsigned long deadline); | ||
1458 | extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, | ||
1459 | unsigned long deadline); | ||
1460 | extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); | ||
1461 | extern void ata_sff_error_handler(struct ata_port *ap); | ||
1462 | extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); | ||
1463 | extern int ata_sff_port_start(struct ata_port *ap); | ||
1464 | extern void ata_sff_std_ports(struct ata_ioports *ioaddr); | ||
1465 | extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, | ||
1466 | unsigned long xfer_mask); | ||
1467 | extern void ata_bmdma_setup(struct ata_queued_cmd *qc); | ||
1468 | extern void ata_bmdma_start(struct ata_queued_cmd *qc); | ||
1469 | extern void ata_bmdma_stop(struct ata_queued_cmd *qc); | ||
1470 | extern u8 ata_bmdma_status(struct ata_port *ap); | ||
1471 | extern void ata_bus_reset(struct ata_port *ap); | ||
1472 | |||
1473 | #ifdef CONFIG_PCI | ||
1474 | extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); | ||
1475 | extern int ata_pci_bmdma_init(struct ata_host *host); | ||
1476 | extern int ata_pci_sff_init_host(struct ata_host *host); | ||
1477 | extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, | ||
1478 | const struct ata_port_info * const * ppi, | ||
1479 | struct ata_host **r_host); | ||
1480 | extern int ata_pci_sff_activate_host(struct ata_host *host, | ||
1481 | irq_handler_t irq_handler, | ||
1482 | struct scsi_host_template *sht); | ||
1483 | extern int ata_pci_sff_init_one(struct pci_dev *pdev, | ||
1484 | const struct ata_port_info * const * ppi, | ||
1485 | struct scsi_host_template *sht, void *host_priv); | ||
1486 | #endif /* CONFIG_PCI */ | ||
1487 | |||
1488 | /** | ||
1489 | * ata_sff_pause - Flush writes and pause 400 nanoseconds. | ||
1490 | * @ap: Port to wait for. | ||
1491 | * | ||
1492 | * LOCKING: | ||
1493 | * Inherited from caller. | ||
1494 | */ | ||
1495 | static inline void ata_sff_pause(struct ata_port *ap) | ||
1496 | { | ||
1497 | ata_sff_altstatus(ap); | ||
1498 | ndelay(400); | ||
1499 | } | ||
1500 | |||
1501 | /** | ||
1502 | * ata_sff_busy_wait - Wait for a port status register | ||
1503 | * @ap: Port to wait for. | ||
1504 | * @bits: bits that must be clear | ||
1505 | * @max: number of 10uS waits to perform | ||
1506 | * | ||
1507 | * Waits up to max*10 microseconds for the selected bits in the port's | ||
1508 | * status register to be cleared. | ||
1509 | * Returns final value of status register. | ||
1510 | * | ||
1511 | * LOCKING: | ||
1512 | * Inherited from caller. | ||
1513 | */ | ||
1514 | static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, | ||
1515 | unsigned int max) | ||
1516 | { | ||
1517 | u8 status; | ||
1518 | |||
1519 | do { | ||
1520 | udelay(10); | ||
1521 | status = ap->ops->sff_check_status(ap); | ||
1522 | max--; | ||
1523 | } while (status != 0xff && (status & bits) && (max > 0)); | ||
1524 | |||
1525 | return status; | ||
1526 | } | ||
1527 | |||
1528 | /** | ||
1529 | * ata_wait_idle - Wait for a port to be idle. | ||
1530 | * @ap: Port to wait for. | ||
1531 | * | ||
1532 | * Waits up to 10ms for port's BUSY and DRQ signals to clear. | ||
1533 | * Returns final value of status register. | ||
1534 | * | ||
1535 | * LOCKING: | ||
1536 | * Inherited from caller. | ||
1537 | */ | ||
1538 | static inline u8 ata_wait_idle(struct ata_port *ap) | ||
1539 | { | ||
1540 | u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); | ||
1541 | |||
1542 | #ifdef ATA_DEBUG | ||
1543 | if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) | ||
1544 | ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", | ||
1545 | status); | ||
1546 | #endif | ||
1547 | |||
1548 | return status; | ||
1549 | } | ||
1550 | #endif /* CONFIG_ATA_SFF */ | ||
1551 | |||
1406 | #endif /* __LINUX_LIBATA_H__ */ | 1552 | #endif /* __LINUX_LIBATA_H__ */ |
diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h index 1418fdc9ac02..f274997bc283 100644 --- a/include/linux/lm_interface.h +++ b/include/linux/lm_interface.h | |||
@@ -21,9 +21,15 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); | |||
21 | * modify the filesystem. The lock module shouldn't assign a journal to the FS | 21 | * modify the filesystem. The lock module shouldn't assign a journal to the FS |
22 | * mount. It shouldn't send recovery callbacks to the FS mount. If the node | 22 | * mount. It shouldn't send recovery callbacks to the FS mount. If the node |
23 | * dies or withdraws, all locks can be wiped immediately. | 23 | * dies or withdraws, all locks can be wiped immediately. |
24 | * | ||
25 | * LM_MFLAG_CONV_NODROP | ||
26 | * Do not allow the dlm to internally resolve conversion deadlocks by demoting | ||
27 | * the lock to unlocked and then reacquiring it in the requested mode. Instead, | ||
28 | * it should cancel the request and return LM_OUT_CONV_DEADLK. | ||
24 | */ | 29 | */ |
25 | 30 | ||
26 | #define LM_MFLAG_SPECTATOR 0x00000001 | 31 | #define LM_MFLAG_SPECTATOR 0x00000001 |
32 | #define LM_MFLAG_CONV_NODROP 0x00000002 | ||
27 | 33 | ||
28 | /* | 34 | /* |
29 | * lm_lockstruct flags | 35 | * lm_lockstruct flags |
@@ -110,6 +116,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); | |||
110 | * | 116 | * |
111 | * LM_OUT_ASYNC | 117 | * LM_OUT_ASYNC |
112 | * The result of the request will be returned in an LM_CB_ASYNC callback. | 118 | * The result of the request will be returned in an LM_CB_ASYNC callback. |
119 | * | ||
120 | * LM_OUT_CONV_DEADLK | ||
121 | * The lock request was canceled do to a conversion deadlock. | ||
113 | */ | 122 | */ |
114 | 123 | ||
115 | #define LM_OUT_ST_MASK 0x00000003 | 124 | #define LM_OUT_ST_MASK 0x00000003 |
@@ -117,6 +126,7 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); | |||
117 | #define LM_OUT_CANCELED 0x00000008 | 126 | #define LM_OUT_CANCELED 0x00000008 |
118 | #define LM_OUT_ASYNC 0x00000080 | 127 | #define LM_OUT_ASYNC 0x00000080 |
119 | #define LM_OUT_ERROR 0x00000100 | 128 | #define LM_OUT_ERROR 0x00000100 |
129 | #define LM_OUT_CONV_DEADLK 0x00000200 | ||
120 | 130 | ||
121 | /* | 131 | /* |
122 | * lm_callback_t types | 132 | * lm_callback_t types |
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 99e044b4efc6..a09b84e4fdb4 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h | |||
@@ -34,7 +34,7 @@ void mb_cache_destroy(struct mb_cache *); | |||
34 | 34 | ||
35 | /* Functions on cache entries */ | 35 | /* Functions on cache entries */ |
36 | 36 | ||
37 | struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *); | 37 | struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); |
38 | int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, | 38 | int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, |
39 | sector_t, unsigned int[]); | 39 | sector_t, unsigned int[]); |
40 | void mb_cache_entry_release(struct mb_cache_entry *); | 40 | void mb_cache_entry_release(struct mb_cache_entry *); |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 7d1eaa97de13..77323a72dd3c 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -81,7 +81,7 @@ enum { | |||
81 | MLX4_CMD_SW2HW_CQ = 0x16, | 81 | MLX4_CMD_SW2HW_CQ = 0x16, |
82 | MLX4_CMD_HW2SW_CQ = 0x17, | 82 | MLX4_CMD_HW2SW_CQ = 0x17, |
83 | MLX4_CMD_QUERY_CQ = 0x18, | 83 | MLX4_CMD_QUERY_CQ = 0x18, |
84 | MLX4_CMD_RESIZE_CQ = 0x2c, | 84 | MLX4_CMD_MODIFY_CQ = 0x2c, |
85 | 85 | ||
86 | /* SRQ commands */ | 86 | /* SRQ commands */ |
87 | MLX4_CMD_SW2HW_SRQ = 0x35, | 87 | MLX4_CMD_SW2HW_SRQ = 0x35, |
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 0181e0a57cbf..071cf96cf01f 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h | |||
@@ -45,11 +45,11 @@ struct mlx4_cqe { | |||
45 | u8 sl; | 45 | u8 sl; |
46 | u8 reserved1; | 46 | u8 reserved1; |
47 | __be16 rlid; | 47 | __be16 rlid; |
48 | u32 reserved2; | 48 | __be32 ipoib_status; |
49 | __be32 byte_cnt; | 49 | __be32 byte_cnt; |
50 | __be16 wqe_index; | 50 | __be16 wqe_index; |
51 | __be16 checksum; | 51 | __be16 checksum; |
52 | u8 reserved3[3]; | 52 | u8 reserved2[3]; |
53 | u8 owner_sr_opcode; | 53 | u8 owner_sr_opcode; |
54 | }; | 54 | }; |
55 | 55 | ||
@@ -85,6 +85,16 @@ enum { | |||
85 | MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, | 85 | MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | enum { | ||
89 | MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, | ||
90 | MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, | ||
91 | MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, | ||
92 | MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, | ||
93 | MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, | ||
94 | MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, | ||
95 | MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, | ||
96 | }; | ||
97 | |||
88 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, | 98 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, |
89 | void __iomem *uar_page, | 99 | void __iomem *uar_page, |
90 | spinlock_t *doorbell_lock) | 100 | spinlock_t *doorbell_lock) |
@@ -120,4 +130,9 @@ enum { | |||
120 | MLX4_CQ_DB_REQ_NOT = 2 << 24 | 130 | MLX4_CQ_DB_REQ_NOT = 2 << 24 |
121 | }; | 131 | }; |
122 | 132 | ||
133 | int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, | ||
134 | u16 count, u16 period); | ||
135 | int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, | ||
136 | int entries, struct mlx4_mtt *mtt); | ||
137 | |||
123 | #endif /* MLX4_CQ_H */ | 138 | #endif /* MLX4_CQ_H */ |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6cdf813cd478..ff7df1a2222f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -186,6 +186,7 @@ struct mlx4_caps { | |||
186 | u32 flags; | 186 | u32 flags; |
187 | u16 stat_rate_support; | 187 | u16 stat_rate_support; |
188 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 188 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
189 | int max_gso_sz; | ||
189 | }; | 190 | }; |
190 | 191 | ||
191 | struct mlx4_buf_list { | 192 | struct mlx4_buf_list { |
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 1b835ca49df1..53c5fdb6eac4 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h | |||
@@ -48,8 +48,7 @@ struct mlx4_interface { | |||
48 | void * (*add) (struct mlx4_dev *dev); | 48 | void * (*add) (struct mlx4_dev *dev); |
49 | void (*remove)(struct mlx4_dev *dev, void *context); | 49 | void (*remove)(struct mlx4_dev *dev, void *context); |
50 | void (*event) (struct mlx4_dev *dev, void *context, | 50 | void (*event) (struct mlx4_dev *dev, void *context, |
51 | enum mlx4_dev_event event, int subtype, | 51 | enum mlx4_dev_event event, int port); |
52 | int port); | ||
53 | struct list_head list; | 52 | struct list_head list; |
54 | }; | 53 | }; |
55 | 54 | ||
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 09a2230923f2..a5e43febee4f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -158,10 +158,12 @@ struct mlx4_qp_context { | |||
158 | #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) | 158 | #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) |
159 | 159 | ||
160 | enum { | 160 | enum { |
161 | MLX4_WQE_CTRL_NEC = 1 << 29, | 161 | MLX4_WQE_CTRL_NEC = 1 << 29, |
162 | MLX4_WQE_CTRL_FENCE = 1 << 6, | 162 | MLX4_WQE_CTRL_FENCE = 1 << 6, |
163 | MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, | 163 | MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, |
164 | MLX4_WQE_CTRL_SOLICITED = 1 << 1, | 164 | MLX4_WQE_CTRL_SOLICITED = 1 << 1, |
165 | MLX4_WQE_CTRL_IP_CSUM = 1 << 4, | ||
166 | MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, | ||
165 | }; | 167 | }; |
166 | 168 | ||
167 | struct mlx4_wqe_ctrl_seg { | 169 | struct mlx4_wqe_ctrl_seg { |
@@ -217,6 +219,11 @@ struct mlx4_wqe_datagram_seg { | |||
217 | __be32 reservd[2]; | 219 | __be32 reservd[2]; |
218 | }; | 220 | }; |
219 | 221 | ||
222 | struct mlx4_lso_seg { | ||
223 | __be32 mss_hdr_size; | ||
224 | __be32 header[0]; | ||
225 | }; | ||
226 | |||
220 | struct mlx4_wqe_bind_seg { | 227 | struct mlx4_wqe_bind_seg { |
221 | __be32 flags1; | 228 | __be32 flags1; |
222 | __be32 flags2; | 229 | __be32 flags2; |
diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 35a8277ec1bd..de4decfa1bfc 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h | |||
@@ -2,7 +2,11 @@ | |||
2 | #define __LINUX_MROUTE_H | 2 | #define __LINUX_MROUTE_H |
3 | 3 | ||
4 | #include <linux/sockios.h> | 4 | #include <linux/sockios.h> |
5 | #include <linux/types.h> | ||
6 | #ifdef __KERNEL__ | ||
5 | #include <linux/in.h> | 7 | #include <linux/in.h> |
8 | #endif | ||
9 | #include <linux/pim.h> | ||
6 | 10 | ||
7 | /* | 11 | /* |
8 | * Based on the MROUTING 3.5 defines primarily to keep | 12 | * Based on the MROUTING 3.5 defines primarily to keep |
@@ -210,27 +214,6 @@ struct mfc_cache | |||
210 | #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ | 214 | #define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ |
211 | 215 | ||
212 | #ifdef __KERNEL__ | 216 | #ifdef __KERNEL__ |
213 | |||
214 | #define PIM_V1_VERSION __constant_htonl(0x10000000) | ||
215 | #define PIM_V1_REGISTER 1 | ||
216 | |||
217 | #define PIM_VERSION 2 | ||
218 | #define PIM_REGISTER 1 | ||
219 | |||
220 | #define PIM_NULL_REGISTER __constant_htonl(0x40000000) | ||
221 | |||
222 | /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ | ||
223 | |||
224 | struct pimreghdr | ||
225 | { | ||
226 | __u8 type; | ||
227 | __u8 reserved; | ||
228 | __be16 csum; | ||
229 | __be32 flags; | ||
230 | }; | ||
231 | |||
232 | extern int pim_rcv_v1(struct sk_buff *); | ||
233 | |||
234 | struct rtmsg; | 217 | struct rtmsg; |
235 | extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); | 218 | extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); |
236 | #endif | 219 | #endif |
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h new file mode 100644 index 000000000000..e7989593142b --- /dev/null +++ b/include/linux/mroute6.h | |||
@@ -0,0 +1,228 @@ | |||
1 | #ifndef __LINUX_MROUTE6_H | ||
2 | #define __LINUX_MROUTE6_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/sockios.h> | ||
6 | |||
7 | /* | ||
8 | * Based on the MROUTING 3.5 defines primarily to keep | ||
9 | * source compatibility with BSD. | ||
10 | * | ||
11 | * See the pim6sd code for the original history. | ||
12 | * | ||
13 | * Protocol Independent Multicast (PIM) data structures included | ||
14 | * Carlos Picoto (cap@di.fc.ul.pt) | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #define MRT6_BASE 200 | ||
19 | #define MRT6_INIT (MRT6_BASE) /* Activate the kernel mroute code */ | ||
20 | #define MRT6_DONE (MRT6_BASE+1) /* Shutdown the kernel mroute */ | ||
21 | #define MRT6_ADD_MIF (MRT6_BASE+2) /* Add a virtual interface */ | ||
22 | #define MRT6_DEL_MIF (MRT6_BASE+3) /* Delete a virtual interface */ | ||
23 | #define MRT6_ADD_MFC (MRT6_BASE+4) /* Add a multicast forwarding entry */ | ||
24 | #define MRT6_DEL_MFC (MRT6_BASE+5) /* Delete a multicast forwarding entry */ | ||
25 | #define MRT6_VERSION (MRT6_BASE+6) /* Get the kernel multicast version */ | ||
26 | #define MRT6_ASSERT (MRT6_BASE+7) /* Activate PIM assert mode */ | ||
27 | #define MRT6_PIM (MRT6_BASE+8) /* enable PIM code */ | ||
28 | |||
29 | #define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE /* IP protocol privates */ | ||
30 | #define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1) | ||
31 | #define SIOCGETRPF (SIOCPROTOPRIVATE+2) | ||
32 | |||
33 | #define MAXMIFS 32 | ||
34 | typedef unsigned long mifbitmap_t; /* User mode code depends on this lot */ | ||
35 | typedef unsigned short mifi_t; | ||
36 | #define ALL_MIFS ((mifi_t)(-1)) | ||
37 | |||
38 | #ifndef IF_SETSIZE | ||
39 | #define IF_SETSIZE 256 | ||
40 | #endif | ||
41 | |||
42 | typedef __u32 if_mask; | ||
43 | #define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */ | ||
44 | |||
45 | #if !defined(__KERNEL__) && !defined(DIV_ROUND_UP) | ||
46 | #define DIV_ROUND_UP(x,y) (((x) + ((y) - 1)) / (y)) | ||
47 | #endif | ||
48 | |||
49 | typedef struct if_set { | ||
50 | if_mask ifs_bits[DIV_ROUND_UP(IF_SETSIZE, NIFBITS)]; | ||
51 | } if_set; | ||
52 | |||
53 | #define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS))) | ||
54 | #define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS))) | ||
55 | #define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS))) | ||
56 | #define IF_COPY(f, t) bcopy(f, t, sizeof(*(f))) | ||
57 | #define IF_ZERO(p) bzero(p, sizeof(*(p))) | ||
58 | |||
59 | /* | ||
60 | * Passed by mrouted for an MRT_ADD_MIF - again we use the | ||
61 | * mrouted 3.6 structures for compatibility | ||
62 | */ | ||
63 | |||
64 | struct mif6ctl { | ||
65 | mifi_t mif6c_mifi; /* Index of MIF */ | ||
66 | unsigned char mif6c_flags; /* MIFF_ flags */ | ||
67 | unsigned char vifc_threshold; /* ttl limit */ | ||
68 | u_short mif6c_pifi; /* the index of the physical IF */ | ||
69 | unsigned int vifc_rate_limit; /* Rate limiter values (NI) */ | ||
70 | }; | ||
71 | |||
72 | #define MIFF_REGISTER 0x1 /* register vif */ | ||
73 | |||
74 | /* | ||
75 | * Cache manipulation structures for mrouted and PIMd | ||
76 | */ | ||
77 | |||
78 | struct mf6cctl | ||
79 | { | ||
80 | struct sockaddr_in6 mf6cc_origin; /* Origin of mcast */ | ||
81 | struct sockaddr_in6 mf6cc_mcastgrp; /* Group in question */ | ||
82 | mifi_t mf6cc_parent; /* Where it arrived */ | ||
83 | struct if_set mf6cc_ifset; /* Where it is going */ | ||
84 | }; | ||
85 | |||
86 | /* | ||
87 | * Group count retrieval for pim6sd | ||
88 | */ | ||
89 | |||
90 | struct sioc_sg_req6 | ||
91 | { | ||
92 | struct sockaddr_in6 src; | ||
93 | struct sockaddr_in6 grp; | ||
94 | unsigned long pktcnt; | ||
95 | unsigned long bytecnt; | ||
96 | unsigned long wrong_if; | ||
97 | }; | ||
98 | |||
99 | /* | ||
100 | * To get vif packet counts | ||
101 | */ | ||
102 | |||
103 | struct sioc_mif_req6 | ||
104 | { | ||
105 | mifi_t mifi; /* Which iface */ | ||
106 | unsigned long icount; /* In packets */ | ||
107 | unsigned long ocount; /* Out packets */ | ||
108 | unsigned long ibytes; /* In bytes */ | ||
109 | unsigned long obytes; /* Out bytes */ | ||
110 | }; | ||
111 | |||
112 | /* | ||
113 | * That's all usermode folks | ||
114 | */ | ||
115 | |||
116 | #ifdef __KERNEL__ | ||
117 | |||
118 | #include <linux/skbuff.h> /* for struct sk_buff_head */ | ||
119 | |||
120 | #ifdef CONFIG_IPV6_MROUTE | ||
121 | static inline int ip6_mroute_opt(int opt) | ||
122 | { | ||
123 | return (opt >= MRT6_BASE) && (opt <= MRT6_BASE + 10); | ||
124 | } | ||
125 | #else | ||
126 | static inline int ip6_mroute_opt(int opt) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | #endif | ||
131 | |||
132 | struct sock; | ||
133 | |||
134 | extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int); | ||
135 | extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); | ||
136 | extern int ip6_mr_input(struct sk_buff *skb); | ||
137 | extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); | ||
138 | extern void ip6_mr_init(void); | ||
139 | |||
140 | struct mif_device | ||
141 | { | ||
142 | struct net_device *dev; /* Device we are using */ | ||
143 | unsigned long bytes_in,bytes_out; | ||
144 | unsigned long pkt_in,pkt_out; /* Statistics */ | ||
145 | unsigned long rate_limit; /* Traffic shaping (NI) */ | ||
146 | unsigned char threshold; /* TTL threshold */ | ||
147 | unsigned short flags; /* Control flags */ | ||
148 | int link; /* Physical interface index */ | ||
149 | }; | ||
150 | |||
151 | #define VIFF_STATIC 0x8000 | ||
152 | |||
153 | struct mfc6_cache | ||
154 | { | ||
155 | struct mfc6_cache *next; /* Next entry on cache line */ | ||
156 | struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */ | ||
157 | struct in6_addr mf6c_origin; /* Source of packet */ | ||
158 | mifi_t mf6c_parent; /* Source interface */ | ||
159 | int mfc_flags; /* Flags on line */ | ||
160 | |||
161 | union { | ||
162 | struct { | ||
163 | unsigned long expires; | ||
164 | struct sk_buff_head unresolved; /* Unresolved buffers */ | ||
165 | } unres; | ||
166 | struct { | ||
167 | unsigned long last_assert; | ||
168 | int minvif; | ||
169 | int maxvif; | ||
170 | unsigned long bytes; | ||
171 | unsigned long pkt; | ||
172 | unsigned long wrong_if; | ||
173 | unsigned char ttls[MAXMIFS]; /* TTL thresholds */ | ||
174 | } res; | ||
175 | } mfc_un; | ||
176 | }; | ||
177 | |||
178 | #define MFC_STATIC 1 | ||
179 | #define MFC_NOTIFY 2 | ||
180 | |||
181 | #define MFC6_LINES 64 | ||
182 | |||
183 | #define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \ | ||
184 | (__force u32)(a)->s6_addr32[1] ^ \ | ||
185 | (__force u32)(a)->s6_addr32[2] ^ \ | ||
186 | (__force u32)(a)->s6_addr32[3] ^ \ | ||
187 | (__force u32)(g)->s6_addr32[0] ^ \ | ||
188 | (__force u32)(g)->s6_addr32[1] ^ \ | ||
189 | (__force u32)(g)->s6_addr32[2] ^ \ | ||
190 | (__force u32)(g)->s6_addr32[3]) % MFC6_LINES) | ||
191 | |||
192 | #define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ | ||
193 | |||
194 | #endif | ||
195 | |||
196 | #ifdef __KERNEL__ | ||
197 | struct rtmsg; | ||
198 | extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); | ||
199 | |||
200 | #ifdef CONFIG_IPV6_MROUTE | ||
201 | extern struct sock *mroute6_socket; | ||
202 | extern int ip6mr_sk_done(struct sock *sk); | ||
203 | #else | ||
204 | #define mroute6_socket NULL | ||
205 | static inline int ip6mr_sk_done(struct sock *sk) { return 0; } | ||
206 | #endif | ||
207 | #endif | ||
208 | |||
209 | /* | ||
210 | * Structure used to communicate from kernel to multicast router. | ||
211 | * We'll overlay the structure onto an MLD header (not an IPv6 heder like igmpmsg{} | ||
212 | * used for IPv4 implementation). This is because this structure will be passed via an | ||
213 | * IPv6 raw socket, on wich an application will only receiver the payload i.e the data after | ||
214 | * the IPv6 header and all the extension headers. (See section 3 of RFC 3542) | ||
215 | */ | ||
216 | |||
217 | struct mrt6msg { | ||
218 | #define MRT6MSG_NOCACHE 1 | ||
219 | #define MRT6MSG_WRONGMIF 2 | ||
220 | #define MRT6MSG_WHOLEPKT 3 /* used for use level encap */ | ||
221 | __u8 im6_mbz; /* must be zero */ | ||
222 | __u8 im6_msgtype; /* what type of message */ | ||
223 | __u16 im6_mif; /* mif rec'd on */ | ||
224 | __u32 im6_pad; /* padding for 64 bit arch */ | ||
225 | struct in6_addr im6_src, im6_dst; | ||
226 | }; | ||
227 | |||
228 | #endif | ||
diff --git a/include/linux/mtio.h b/include/linux/mtio.h index 6f8d2d45a8fb..ef01d6aa5934 100644 --- a/include/linux/mtio.h +++ b/include/linux/mtio.h | |||
@@ -192,6 +192,7 @@ struct mtpos { | |||
192 | #define MT_ST_SCSI2LOGICAL 0x800 | 192 | #define MT_ST_SCSI2LOGICAL 0x800 |
193 | #define MT_ST_SYSV 0x1000 | 193 | #define MT_ST_SYSV 0x1000 |
194 | #define MT_ST_NOWAIT 0x2000 | 194 | #define MT_ST_NOWAIT 0x2000 |
195 | #define MT_ST_SILI 0x4000 | ||
195 | 196 | ||
196 | /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */ | 197 | /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */ |
197 | #define MT_ST_CLEAR_DEFAULT 0xfffff | 198 | #define MT_ST_CLEAR_DEFAULT 0xfffff |
diff --git a/include/linux/net.h b/include/linux/net.h index c414d90e647b..71f7dd559285 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define _LINUX_NET_H | 19 | #define _LINUX_NET_H |
20 | 20 | ||
21 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/socket.h> | ||
22 | #include <asm/socket.h> | 23 | #include <asm/socket.h> |
23 | 24 | ||
24 | struct poll_table_struct; | 25 | struct poll_table_struct; |
@@ -26,7 +27,7 @@ struct pipe_inode_info; | |||
26 | struct inode; | 27 | struct inode; |
27 | struct net; | 28 | struct net; |
28 | 29 | ||
29 | #define NPROTO 34 /* should be enough for now.. */ | 30 | #define NPROTO AF_MAX |
30 | 31 | ||
31 | #define SYS_SOCKET 1 /* sys_socket(2) */ | 32 | #define SYS_SOCKET 1 /* sys_socket(2) */ |
32 | #define SYS_BIND 2 /* sys_bind(2) */ | 33 | #define SYS_BIND 2 /* sys_bind(2) */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ee81906b5164..7c1d4466583b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -710,8 +710,10 @@ struct net_device | |||
710 | void (*poll_controller)(struct net_device *dev); | 710 | void (*poll_controller)(struct net_device *dev); |
711 | #endif | 711 | #endif |
712 | 712 | ||
713 | #ifdef CONFIG_NET_NS | ||
713 | /* Network namespace this network device is inside */ | 714 | /* Network namespace this network device is inside */ |
714 | struct net *nd_net; | 715 | struct net *nd_net; |
716 | #endif | ||
715 | 717 | ||
716 | /* bridge stuff */ | 718 | /* bridge stuff */ |
717 | struct net_bridge_port *br_port; | 719 | struct net_bridge_port *br_port; |
@@ -726,6 +728,10 @@ struct net_device | |||
726 | /* rtnetlink link ops */ | 728 | /* rtnetlink link ops */ |
727 | const struct rtnl_link_ops *rtnl_link_ops; | 729 | const struct rtnl_link_ops *rtnl_link_ops; |
728 | 730 | ||
731 | /* for setting kernel sock attribute on TCP connection setup */ | ||
732 | #define GSO_MAX_SIZE 65536 | ||
733 | unsigned int gso_max_size; | ||
734 | |||
729 | /* The TX queue control structures */ | 735 | /* The TX queue control structures */ |
730 | unsigned int egress_subqueue_count; | 736 | unsigned int egress_subqueue_count; |
731 | struct net_device_subqueue egress_subqueue[1]; | 737 | struct net_device_subqueue egress_subqueue[1]; |
@@ -735,6 +741,28 @@ struct net_device | |||
735 | #define NETDEV_ALIGN 32 | 741 | #define NETDEV_ALIGN 32 |
736 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 742 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
737 | 743 | ||
744 | /* | ||
745 | * Net namespace inlines | ||
746 | */ | ||
747 | static inline | ||
748 | struct net *dev_net(const struct net_device *dev) | ||
749 | { | ||
750 | #ifdef CONFIG_NET_NS | ||
751 | return dev->nd_net; | ||
752 | #else | ||
753 | return &init_net; | ||
754 | #endif | ||
755 | } | ||
756 | |||
757 | static inline | ||
758 | void dev_net_set(struct net_device *dev, struct net *net) | ||
759 | { | ||
760 | #ifdef CONFIG_NET_NS | ||
761 | release_net(dev->nd_net); | ||
762 | dev->nd_net = hold_net(net); | ||
763 | #endif | ||
764 | } | ||
765 | |||
738 | /** | 766 | /** |
739 | * netdev_priv - access network device private data | 767 | * netdev_priv - access network device private data |
740 | * @dev: network device | 768 | * @dev: network device |
@@ -811,7 +839,7 @@ static inline struct net_device *next_net_device(struct net_device *dev) | |||
811 | struct list_head *lh; | 839 | struct list_head *lh; |
812 | struct net *net; | 840 | struct net *net; |
813 | 841 | ||
814 | net = dev->nd_net; | 842 | net = dev_net(dev); |
815 | lh = dev->dev_list.next; | 843 | lh = dev->dev_list.next; |
816 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 844 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
817 | } | 845 | } |
@@ -1479,6 +1507,12 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | |||
1479 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | 1507 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); |
1480 | } | 1508 | } |
1481 | 1509 | ||
1510 | static inline void netif_set_gso_max_size(struct net_device *dev, | ||
1511 | unsigned int size) | ||
1512 | { | ||
1513 | dev->gso_max_size = size; | ||
1514 | } | ||
1515 | |||
1482 | /* On bonding slaves other than the currently active slave, suppress | 1516 | /* On bonding slaves other than the currently active slave, suppress |
1483 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and | 1517 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
1484 | * ARP on active-backup slaves with arp_validate enabled. | 1518 | * ARP on active-backup slaves with arp_validate enabled. |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index f0680c2bee73..e4c66593b5c6 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -6,11 +6,13 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/skbuff.h> | 7 | #include <linux/skbuff.h> |
8 | #include <linux/net.h> | 8 | #include <linux/net.h> |
9 | #include <linux/netdevice.h> | ||
9 | #include <linux/if.h> | 10 | #include <linux/if.h> |
10 | #include <linux/in.h> | 11 | #include <linux/in.h> |
11 | #include <linux/in6.h> | 12 | #include <linux/in6.h> |
12 | #include <linux/wait.h> | 13 | #include <linux/wait.h> |
13 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <net/net_namespace.h> | ||
14 | #endif | 16 | #endif |
15 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
16 | 18 | ||
@@ -61,13 +63,21 @@ union nf_inet_addr { | |||
61 | #ifdef __KERNEL__ | 63 | #ifdef __KERNEL__ |
62 | #ifdef CONFIG_NETFILTER | 64 | #ifdef CONFIG_NETFILTER |
63 | 65 | ||
66 | static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, | ||
67 | const union nf_inet_addr *a2) | ||
68 | { | ||
69 | return a1->all[0] == a2->all[0] && | ||
70 | a1->all[1] == a2->all[1] && | ||
71 | a1->all[2] == a2->all[2] && | ||
72 | a1->all[3] == a2->all[3]; | ||
73 | } | ||
74 | |||
64 | extern void netfilter_init(void); | 75 | extern void netfilter_init(void); |
65 | 76 | ||
66 | /* Largest hook number + 1 */ | 77 | /* Largest hook number + 1 */ |
67 | #define NF_MAX_HOOKS 8 | 78 | #define NF_MAX_HOOKS 8 |
68 | 79 | ||
69 | struct sk_buff; | 80 | struct sk_buff; |
70 | struct net_device; | ||
71 | 81 | ||
72 | typedef unsigned int nf_hookfn(unsigned int hooknum, | 82 | typedef unsigned int nf_hookfn(unsigned int hooknum, |
73 | struct sk_buff *skb, | 83 | struct sk_buff *skb, |
@@ -224,6 +234,11 @@ struct nf_afinfo { | |||
224 | unsigned short family; | 234 | unsigned short family; |
225 | __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, | 235 | __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, |
226 | unsigned int dataoff, u_int8_t protocol); | 236 | unsigned int dataoff, u_int8_t protocol); |
237 | __sum16 (*checksum_partial)(struct sk_buff *skb, | ||
238 | unsigned int hook, | ||
239 | unsigned int dataoff, | ||
240 | unsigned int len, | ||
241 | u_int8_t protocol); | ||
227 | int (*route)(struct dst_entry **dst, struct flowi *fl); | 242 | int (*route)(struct dst_entry **dst, struct flowi *fl); |
228 | void (*saveroute)(const struct sk_buff *skb, | 243 | void (*saveroute)(const struct sk_buff *skb, |
229 | struct nf_queue_entry *entry); | 244 | struct nf_queue_entry *entry); |
@@ -253,6 +268,23 @@ nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, | |||
253 | return csum; | 268 | return csum; |
254 | } | 269 | } |
255 | 270 | ||
271 | static inline __sum16 | ||
272 | nf_checksum_partial(struct sk_buff *skb, unsigned int hook, | ||
273 | unsigned int dataoff, unsigned int len, | ||
274 | u_int8_t protocol, unsigned short family) | ||
275 | { | ||
276 | const struct nf_afinfo *afinfo; | ||
277 | __sum16 csum = 0; | ||
278 | |||
279 | rcu_read_lock(); | ||
280 | afinfo = nf_get_afinfo(family); | ||
281 | if (afinfo) | ||
282 | csum = afinfo->checksum_partial(skb, hook, dataoff, len, | ||
283 | protocol); | ||
284 | rcu_read_unlock(); | ||
285 | return csum; | ||
286 | } | ||
287 | |||
256 | extern int nf_register_afinfo(const struct nf_afinfo *afinfo); | 288 | extern int nf_register_afinfo(const struct nf_afinfo *afinfo); |
257 | extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); | 289 | extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); |
258 | 290 | ||
@@ -311,5 +343,56 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *); | |||
311 | static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} | 343 | static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} |
312 | #endif | 344 | #endif |
313 | 345 | ||
346 | static inline struct net *nf_pre_routing_net(const struct net_device *in, | ||
347 | const struct net_device *out) | ||
348 | { | ||
349 | #ifdef CONFIG_NET_NS | ||
350 | return in->nd_net; | ||
351 | #else | ||
352 | return &init_net; | ||
353 | #endif | ||
354 | } | ||
355 | |||
356 | static inline struct net *nf_local_in_net(const struct net_device *in, | ||
357 | const struct net_device *out) | ||
358 | { | ||
359 | #ifdef CONFIG_NET_NS | ||
360 | return in->nd_net; | ||
361 | #else | ||
362 | return &init_net; | ||
363 | #endif | ||
364 | } | ||
365 | |||
366 | static inline struct net *nf_forward_net(const struct net_device *in, | ||
367 | const struct net_device *out) | ||
368 | { | ||
369 | #ifdef CONFIG_NET_NS | ||
370 | BUG_ON(in->nd_net != out->nd_net); | ||
371 | return in->nd_net; | ||
372 | #else | ||
373 | return &init_net; | ||
374 | #endif | ||
375 | } | ||
376 | |||
377 | static inline struct net *nf_local_out_net(const struct net_device *in, | ||
378 | const struct net_device *out) | ||
379 | { | ||
380 | #ifdef CONFIG_NET_NS | ||
381 | return out->nd_net; | ||
382 | #else | ||
383 | return &init_net; | ||
384 | #endif | ||
385 | } | ||
386 | |||
387 | static inline struct net *nf_post_routing_net(const struct net_device *in, | ||
388 | const struct net_device *out) | ||
389 | { | ||
390 | #ifdef CONFIG_NET_NS | ||
391 | return out->nd_net; | ||
392 | #else | ||
393 | return &init_net; | ||
394 | #endif | ||
395 | } | ||
396 | |||
314 | #endif /*__KERNEL__*/ | 397 | #endif /*__KERNEL__*/ |
315 | #endif /*__LINUX_NETFILTER_H*/ | 398 | #endif /*__LINUX_NETFILTER_H*/ |
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h new file mode 100644 index 000000000000..40dcc82058d1 --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_dccp.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #ifndef _NF_CONNTRACK_DCCP_H | ||
2 | #define _NF_CONNTRACK_DCCP_H | ||
3 | |||
4 | /* Exposed to userspace over nfnetlink */ | ||
5 | enum ct_dccp_states { | ||
6 | CT_DCCP_NONE, | ||
7 | CT_DCCP_REQUEST, | ||
8 | CT_DCCP_RESPOND, | ||
9 | CT_DCCP_PARTOPEN, | ||
10 | CT_DCCP_OPEN, | ||
11 | CT_DCCP_CLOSEREQ, | ||
12 | CT_DCCP_CLOSING, | ||
13 | CT_DCCP_TIMEWAIT, | ||
14 | CT_DCCP_IGNORE, | ||
15 | CT_DCCP_INVALID, | ||
16 | __CT_DCCP_MAX | ||
17 | }; | ||
18 | #define CT_DCCP_MAX (__CT_DCCP_MAX - 1) | ||
19 | |||
20 | enum ct_dccp_roles { | ||
21 | CT_DCCP_ROLE_CLIENT, | ||
22 | CT_DCCP_ROLE_SERVER, | ||
23 | __CT_DCCP_ROLE_MAX | ||
24 | }; | ||
25 | #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) | ||
26 | |||
27 | #ifdef __KERNEL__ | ||
28 | #include <net/netfilter/nf_conntrack_tuple.h> | ||
29 | |||
30 | struct nf_ct_dccp { | ||
31 | u_int8_t role[IP_CT_DIR_MAX]; | ||
32 | u_int8_t state; | ||
33 | u_int8_t last_pkt; | ||
34 | u_int8_t last_dir; | ||
35 | u_int64_t handshake_seq; | ||
36 | }; | ||
37 | |||
38 | #endif /* __KERNEL__ */ | ||
39 | |||
40 | #endif /* _NF_CONNTRACK_DCCP_H */ | ||
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index 8e5ce1ca7bfc..5da04e586a3f 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h | |||
@@ -5,37 +5,164 @@ | |||
5 | #define SIP_PORT 5060 | 5 | #define SIP_PORT 5060 |
6 | #define SIP_TIMEOUT 3600 | 6 | #define SIP_TIMEOUT 3600 |
7 | 7 | ||
8 | enum sip_header_pos { | 8 | struct nf_ct_sip_master { |
9 | POS_REG_REQ_URI, | 9 | unsigned int register_cseq; |
10 | POS_REQ_URI, | 10 | }; |
11 | POS_FROM, | 11 | |
12 | POS_TO, | 12 | enum sip_expectation_classes { |
13 | POS_VIA, | 13 | SIP_EXPECT_SIGNALLING, |
14 | POS_CONTACT, | 14 | SIP_EXPECT_AUDIO, |
15 | POS_CONTENT, | 15 | SIP_EXPECT_VIDEO, |
16 | POS_MEDIA, | 16 | __SIP_EXPECT_MAX |
17 | POS_OWNER_IP4, | 17 | }; |
18 | POS_CONNECTION_IP4, | 18 | #define SIP_EXPECT_MAX (__SIP_EXPECT_MAX - 1) |
19 | POS_OWNER_IP6, | 19 | |
20 | POS_CONNECTION_IP6, | 20 | struct sdp_media_type { |
21 | POS_SDP_HEADER, | 21 | const char *name; |
22 | unsigned int len; | ||
23 | enum sip_expectation_classes class; | ||
24 | }; | ||
25 | |||
26 | #define SDP_MEDIA_TYPE(__name, __class) \ | ||
27 | { \ | ||
28 | .name = (__name), \ | ||
29 | .len = sizeof(__name) - 1, \ | ||
30 | .class = (__class), \ | ||
31 | } | ||
32 | |||
33 | struct sip_handler { | ||
34 | const char *method; | ||
35 | unsigned int len; | ||
36 | int (*request)(struct sk_buff *skb, | ||
37 | const char **dptr, unsigned int *datalen, | ||
38 | unsigned int cseq); | ||
39 | int (*response)(struct sk_buff *skb, | ||
40 | const char **dptr, unsigned int *datalen, | ||
41 | unsigned int cseq, unsigned int code); | ||
42 | }; | ||
43 | |||
44 | #define SIP_HANDLER(__method, __request, __response) \ | ||
45 | { \ | ||
46 | .method = (__method), \ | ||
47 | .len = sizeof(__method) - 1, \ | ||
48 | .request = (__request), \ | ||
49 | .response = (__response), \ | ||
50 | } | ||
51 | |||
52 | struct sip_header { | ||
53 | const char *name; | ||
54 | const char *cname; | ||
55 | const char *search; | ||
56 | unsigned int len; | ||
57 | unsigned int clen; | ||
58 | unsigned int slen; | ||
59 | int (*match_len)(const struct nf_conn *ct, | ||
60 | const char *dptr, const char *limit, | ||
61 | int *shift); | ||
62 | }; | ||
63 | |||
64 | #define __SIP_HDR(__name, __cname, __search, __match) \ | ||
65 | { \ | ||
66 | .name = (__name), \ | ||
67 | .len = sizeof(__name) - 1, \ | ||
68 | .cname = (__cname), \ | ||
69 | .clen = (__cname) ? sizeof(__cname) - 1 : 0, \ | ||
70 | .search = (__search), \ | ||
71 | .slen = (__search) ? sizeof(__search) - 1 : 0, \ | ||
72 | .match_len = (__match), \ | ||
73 | } | ||
74 | |||
75 | #define SIP_HDR(__name, __cname, __search, __match) \ | ||
76 | __SIP_HDR(__name, __cname, __search, __match) | ||
77 | |||
78 | #define SDP_HDR(__name, __search, __match) \ | ||
79 | __SIP_HDR(__name, NULL, __search, __match) | ||
80 | |||
81 | enum sip_header_types { | ||
82 | SIP_HDR_CSEQ, | ||
83 | SIP_HDR_FROM, | ||
84 | SIP_HDR_TO, | ||
85 | SIP_HDR_CONTACT, | ||
86 | SIP_HDR_VIA, | ||
87 | SIP_HDR_EXPIRES, | ||
88 | SIP_HDR_CONTENT_LENGTH, | ||
89 | }; | ||
90 | |||
91 | enum sdp_header_types { | ||
92 | SDP_HDR_UNSPEC, | ||
93 | SDP_HDR_VERSION, | ||
94 | SDP_HDR_OWNER_IP4, | ||
95 | SDP_HDR_CONNECTION_IP4, | ||
96 | SDP_HDR_OWNER_IP6, | ||
97 | SDP_HDR_CONNECTION_IP6, | ||
98 | SDP_HDR_MEDIA, | ||
22 | }; | 99 | }; |
23 | 100 | ||
24 | extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, | 101 | extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, |
25 | enum ip_conntrack_info ctinfo, | 102 | const char **dptr, |
26 | struct nf_conn *ct, | 103 | unsigned int *datalen); |
27 | const char **dptr); | 104 | extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, |
28 | extern unsigned int (*nf_nat_sdp_hook)(struct sk_buff *skb, | 105 | const char **dptr, |
29 | enum ip_conntrack_info ctinfo, | 106 | unsigned int *datalen, |
30 | struct nf_conntrack_expect *exp, | 107 | struct nf_conntrack_expect *exp, |
31 | const char *dptr); | 108 | unsigned int matchoff, |
32 | 109 | unsigned int matchlen); | |
33 | extern int ct_sip_get_info(const struct nf_conn *ct, const char *dptr, | 110 | extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, |
34 | size_t dlen, unsigned int *matchoff, | 111 | const char **dptr, |
35 | unsigned int *matchlen, enum sip_header_pos pos); | 112 | unsigned int dataoff, |
36 | extern int ct_sip_lnlen(const char *line, const char *limit); | 113 | unsigned int *datalen, |
37 | extern const char *ct_sip_search(const char *needle, const char *haystack, | 114 | enum sdp_header_types type, |
38 | size_t needle_len, size_t haystack_len, | 115 | enum sdp_header_types term, |
39 | int case_sensitive); | 116 | const union nf_inet_addr *addr); |
117 | extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, | ||
118 | const char **dptr, | ||
119 | unsigned int *datalen, | ||
120 | unsigned int matchoff, | ||
121 | unsigned int matchlen, | ||
122 | u_int16_t port); | ||
123 | extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, | ||
124 | const char **dptr, | ||
125 | unsigned int dataoff, | ||
126 | unsigned int *datalen, | ||
127 | const union nf_inet_addr *addr); | ||
128 | extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, | ||
129 | const char **dptr, | ||
130 | unsigned int *datalen, | ||
131 | struct nf_conntrack_expect *rtp_exp, | ||
132 | struct nf_conntrack_expect *rtcp_exp, | ||
133 | unsigned int mediaoff, | ||
134 | unsigned int medialen, | ||
135 | union nf_inet_addr *rtp_addr); | ||
136 | |||
137 | extern int ct_sip_parse_request(const struct nf_conn *ct, | ||
138 | const char *dptr, unsigned int datalen, | ||
139 | unsigned int *matchoff, unsigned int *matchlen, | ||
140 | union nf_inet_addr *addr, __be16 *port); | ||
141 | extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr, | ||
142 | unsigned int dataoff, unsigned int datalen, | ||
143 | enum sip_header_types type, | ||
144 | unsigned int *matchoff, unsigned int *matchlen); | ||
145 | extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, | ||
146 | unsigned int *dataoff, unsigned int datalen, | ||
147 | enum sip_header_types type, int *in_header, | ||
148 | unsigned int *matchoff, unsigned int *matchlen, | ||
149 | union nf_inet_addr *addr, __be16 *port); | ||
150 | extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, | ||
151 | unsigned int dataoff, unsigned int datalen, | ||
152 | const char *name, | ||
153 | unsigned int *matchoff, unsigned int *matchlen, | ||
154 | union nf_inet_addr *addr); | ||
155 | extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, | ||
156 | unsigned int off, unsigned int datalen, | ||
157 | const char *name, | ||
158 | unsigned int *matchoff, unsigned int *matchen, | ||
159 | unsigned int *val); | ||
160 | |||
161 | extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, | ||
162 | unsigned int dataoff, unsigned int datalen, | ||
163 | enum sdp_header_types type, | ||
164 | enum sdp_header_types term, | ||
165 | unsigned int *matchoff, unsigned int *matchlen); | ||
166 | |||
40 | #endif /* __KERNEL__ */ | 167 | #endif /* __KERNEL__ */ |
41 | #endif /* __NF_CONNTRACK_SIP_H__ */ | 168 | #endif /* __NF_CONNTRACK_SIP_H__ */ |
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index e3e1533aba2d..0a383ac083cb 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h | |||
@@ -80,6 +80,7 @@ enum ctattr_l4proto { | |||
80 | enum ctattr_protoinfo { | 80 | enum ctattr_protoinfo { |
81 | CTA_PROTOINFO_UNSPEC, | 81 | CTA_PROTOINFO_UNSPEC, |
82 | CTA_PROTOINFO_TCP, | 82 | CTA_PROTOINFO_TCP, |
83 | CTA_PROTOINFO_DCCP, | ||
83 | __CTA_PROTOINFO_MAX | 84 | __CTA_PROTOINFO_MAX |
84 | }; | 85 | }; |
85 | #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) | 86 | #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) |
@@ -95,6 +96,13 @@ enum ctattr_protoinfo_tcp { | |||
95 | }; | 96 | }; |
96 | #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) | 97 | #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) |
97 | 98 | ||
99 | enum ctattr_protoinfo_dccp { | ||
100 | CTA_PROTOINFO_DCCP_UNSPEC, | ||
101 | CTA_PROTOINFO_DCCP_STATE, | ||
102 | __CTA_PROTOINFO_DCCP_MAX, | ||
103 | }; | ||
104 | #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) | ||
105 | |||
98 | enum ctattr_counters { | 106 | enum ctattr_counters { |
99 | CTA_COUNTERS_UNSPEC, | 107 | CTA_COUNTERS_UNSPEC, |
100 | CTA_COUNTERS_PACKETS, /* old 64bit counters */ | 108 | CTA_COUNTERS_PACKETS, /* old 64bit counters */ |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index b2c62cc618f5..2326296b6f25 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -430,13 +430,13 @@ extern int xt_compat_add_offset(int af, unsigned int offset, short delta); | |||
430 | extern void xt_compat_flush_offsets(int af); | 430 | extern void xt_compat_flush_offsets(int af); |
431 | extern short xt_compat_calc_jump(int af, unsigned int offset); | 431 | extern short xt_compat_calc_jump(int af, unsigned int offset); |
432 | 432 | ||
433 | extern int xt_compat_match_offset(struct xt_match *match); | 433 | extern int xt_compat_match_offset(const struct xt_match *match); |
434 | extern int xt_compat_match_from_user(struct xt_entry_match *m, | 434 | extern int xt_compat_match_from_user(struct xt_entry_match *m, |
435 | void **dstptr, unsigned int *size); | 435 | void **dstptr, unsigned int *size); |
436 | extern int xt_compat_match_to_user(struct xt_entry_match *m, | 436 | extern int xt_compat_match_to_user(struct xt_entry_match *m, |
437 | void __user **dstptr, unsigned int *size); | 437 | void __user **dstptr, unsigned int *size); |
438 | 438 | ||
439 | extern int xt_compat_target_offset(struct xt_target *target); | 439 | extern int xt_compat_target_offset(const struct xt_target *target); |
440 | extern void xt_compat_target_from_user(struct xt_entry_target *t, | 440 | extern void xt_compat_target_from_user(struct xt_entry_target *t, |
441 | void **dstptr, unsigned int *size); | 441 | void **dstptr, unsigned int *size); |
442 | extern int xt_compat_target_to_user(struct xt_entry_target *t, | 442 | extern int xt_compat_target_to_user(struct xt_entry_target *t, |
diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h index dd5a4fd4cfd3..32000ba6ecef 100644 --- a/include/linux/netfilter/xt_sctp.h +++ b/include/linux/netfilter/xt_sctp.h | |||
@@ -37,68 +37,54 @@ struct xt_sctp_info { | |||
37 | 37 | ||
38 | #define SCTP_CHUNKMAP_SET(chunkmap, type) \ | 38 | #define SCTP_CHUNKMAP_SET(chunkmap, type) \ |
39 | do { \ | 39 | do { \ |
40 | chunkmap[type / bytes(u_int32_t)] |= \ | 40 | (chunkmap)[type / bytes(u_int32_t)] |= \ |
41 | 1 << (type % bytes(u_int32_t)); \ | 41 | 1 << (type % bytes(u_int32_t)); \ |
42 | } while (0) | 42 | } while (0) |
43 | 43 | ||
44 | #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ | 44 | #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ |
45 | do { \ | 45 | do { \ |
46 | chunkmap[type / bytes(u_int32_t)] &= \ | 46 | (chunkmap)[type / bytes(u_int32_t)] &= \ |
47 | ~(1 << (type % bytes(u_int32_t))); \ | 47 | ~(1 << (type % bytes(u_int32_t))); \ |
48 | } while (0) | 48 | } while (0) |
49 | 49 | ||
50 | #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ | 50 | #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ |
51 | ({ \ | 51 | ({ \ |
52 | (chunkmap[type / bytes (u_int32_t)] & \ | 52 | ((chunkmap)[type / bytes (u_int32_t)] & \ |
53 | (1 << (type % bytes (u_int32_t)))) ? 1: 0; \ | 53 | (1 << (type % bytes (u_int32_t)))) ? 1: 0; \ |
54 | }) | 54 | }) |
55 | 55 | ||
56 | #define SCTP_CHUNKMAP_RESET(chunkmap) \ | 56 | #define SCTP_CHUNKMAP_RESET(chunkmap) \ |
57 | do { \ | 57 | memset((chunkmap), 0, sizeof(chunkmap)) |
58 | int i; \ | 58 | |
59 | for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \ | 59 | #define SCTP_CHUNKMAP_SET_ALL(chunkmap) \ |
60 | chunkmap[i] = 0; \ | 60 | memset((chunkmap), ~0U, sizeof(chunkmap)) |
61 | } while (0) | 61 | |
62 | 62 | #define SCTP_CHUNKMAP_COPY(destmap, srcmap) \ | |
63 | #define SCTP_CHUNKMAP_SET_ALL(chunkmap) \ | 63 | memcpy((destmap), (srcmap), sizeof(srcmap)) |
64 | do { \ | 64 | |
65 | int i; \ | 65 | #define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \ |
66 | for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \ | 66 | __sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap)) |
67 | chunkmap[i] = ~0; \ | 67 | static inline bool |
68 | } while (0) | 68 | __sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n) |
69 | 69 | { | |
70 | #define SCTP_CHUNKMAP_COPY(destmap, srcmap) \ | 70 | unsigned int i; |
71 | do { \ | 71 | for (i = 0; i < n; ++i) |
72 | int i; \ | 72 | if (chunkmap[i]) |
73 | for (i = 0; i < ARRAY_SIZE(srcmap); i++) \ | 73 | return false; |
74 | destmap[i] = srcmap[i]; \ | 74 | return true; |
75 | } while (0) | 75 | } |
76 | 76 | ||
77 | #define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \ | 77 | #define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \ |
78 | ({ \ | 78 | __sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap)) |
79 | int i; \ | 79 | static inline bool |
80 | int flag = 1; \ | 80 | __sctp_chunkmap_is_all_set(const u_int32_t *chunkmap, unsigned int n) |
81 | for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \ | 81 | { |
82 | if (chunkmap[i]) { \ | 82 | unsigned int i; |
83 | flag = 0; \ | 83 | for (i = 0; i < n; ++i) |
84 | break; \ | 84 | if (chunkmap[i] != ~0U) |
85 | } \ | 85 | return false; |
86 | } \ | 86 | return true; |
87 | flag; \ | 87 | } |
88 | }) | ||
89 | |||
90 | #define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \ | ||
91 | ({ \ | ||
92 | int i; \ | ||
93 | int flag = 1; \ | ||
94 | for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \ | ||
95 | if (chunkmap[i] != ~0) { \ | ||
96 | flag = 0; \ | ||
97 | break; \ | ||
98 | } \ | ||
99 | } \ | ||
100 | flag; \ | ||
101 | }) | ||
102 | 88 | ||
103 | #endif /* _XT_SCTP_H_ */ | 89 | #endif /* _XT_SCTP_H_ */ |
104 | 90 | ||
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index db223ca92c8b..dd9c97f2d436 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h | |||
@@ -23,8 +23,6 @@ | |||
23 | 23 | ||
24 | #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 24 | #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
25 | #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 25 | #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
26 | #define arpt_target xt_target | ||
27 | #define arpt_table xt_table | ||
28 | 26 | ||
29 | #define ARPT_DEV_ADDR_LEN_MAX 16 | 27 | #define ARPT_DEV_ADDR_LEN_MAX 16 |
30 | 28 | ||
@@ -266,20 +264,15 @@ struct arpt_error | |||
266 | .target.errorname = "ERROR", \ | 264 | .target.errorname = "ERROR", \ |
267 | } | 265 | } |
268 | 266 | ||
269 | #define arpt_register_target(tgt) \ | 267 | extern struct xt_table *arpt_register_table(struct net *net, |
270 | ({ (tgt)->family = NF_ARP; \ | 268 | struct xt_table *table, |
271 | xt_register_target(tgt); }) | 269 | const struct arpt_replace *repl); |
272 | #define arpt_unregister_target(tgt) xt_unregister_target(tgt) | 270 | extern void arpt_unregister_table(struct xt_table *table); |
273 | |||
274 | extern struct arpt_table *arpt_register_table(struct net *net, | ||
275 | struct arpt_table *table, | ||
276 | const struct arpt_replace *repl); | ||
277 | extern void arpt_unregister_table(struct arpt_table *table); | ||
278 | extern unsigned int arpt_do_table(struct sk_buff *skb, | 271 | extern unsigned int arpt_do_table(struct sk_buff *skb, |
279 | unsigned int hook, | 272 | unsigned int hook, |
280 | const struct net_device *in, | 273 | const struct net_device *in, |
281 | const struct net_device *out, | 274 | const struct net_device *out, |
282 | struct arpt_table *table); | 275 | struct xt_table *table); |
283 | 276 | ||
284 | #define ARPT_ALIGN(s) XT_ALIGN(s) | 277 | #define ARPT_ALIGN(s) XT_ALIGN(s) |
285 | 278 | ||
diff --git a/include/linux/netfilter_bridge/ebt_nflog.h b/include/linux/netfilter_bridge/ebt_nflog.h new file mode 100644 index 000000000000..052817849b83 --- /dev/null +++ b/include/linux/netfilter_bridge/ebt_nflog.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __LINUX_BRIDGE_EBT_NFLOG_H | ||
2 | #define __LINUX_BRIDGE_EBT_NFLOG_H | ||
3 | |||
4 | #define EBT_NFLOG_MASK 0x0 | ||
5 | |||
6 | #define EBT_NFLOG_PREFIX_SIZE 64 | ||
7 | #define EBT_NFLOG_WATCHER "nflog" | ||
8 | |||
9 | #define EBT_NFLOG_DEFAULT_GROUP 0x1 | ||
10 | #define EBT_NFLOG_DEFAULT_THRESHOLD 1 | ||
11 | |||
12 | struct ebt_nflog_info { | ||
13 | u_int32_t len; | ||
14 | u_int16_t group; | ||
15 | u_int16_t threshold; | ||
16 | u_int16_t flags; | ||
17 | u_int16_t pad; | ||
18 | char prefix[EBT_NFLOG_PREFIX_SIZE]; | ||
19 | }; | ||
20 | |||
21 | #endif /* __LINUX_BRIDGE_EBT_NFLOG_H */ | ||
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 9a10092e358c..650318b0c405 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h | |||
@@ -62,8 +62,6 @@ enum nf_ip_hook_priorities { | |||
62 | NF_IP_PRI_FILTER = 0, | 62 | NF_IP_PRI_FILTER = 0, |
63 | NF_IP_PRI_NAT_SRC = 100, | 63 | NF_IP_PRI_NAT_SRC = 100, |
64 | NF_IP_PRI_SELINUX_LAST = 225, | 64 | NF_IP_PRI_SELINUX_LAST = 225, |
65 | NF_IP_PRI_CONNTRACK_HELPER = INT_MAX - 2, | ||
66 | NF_IP_PRI_NAT_SEQ_ADJUST = INT_MAX - 1, | ||
67 | NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX, | 65 | NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX, |
68 | NF_IP_PRI_LAST = INT_MAX, | 66 | NF_IP_PRI_LAST = INT_MAX, |
69 | }; | 67 | }; |
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 9fecf902419c..ea6517e58b04 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h | |||
@@ -78,6 +78,18 @@ | |||
78 | * or, if no MAC address given, all stations, on the interface identified | 78 | * or, if no MAC address given, all stations, on the interface identified |
79 | * by %NL80211_ATTR_IFINDEX. | 79 | * by %NL80211_ATTR_IFINDEX. |
80 | * | 80 | * |
81 | * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to | ||
82 | * destination %NL80211_ATTR_MAC on the interface identified by | ||
83 | * %NL80211_ATTR_IFINDEX. | ||
84 | * @NL80211_CMD_SET_MPATH: Set mesh path attributes for mesh path to | ||
85 | * destination %NL80211_ATTR_MAC on the interface identified by | ||
86 | * %NL80211_ATTR_IFINDEX. | ||
87 | * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the | ||
88 | * the interface identified by %NL80211_ATTR_IFINDEX. | ||
89 | * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC | ||
90 | * or, if no MAC address given, all mesh paths, on the interface identified | ||
91 | * by %NL80211_ATTR_IFINDEX. | ||
92 | * | ||
81 | * @NL80211_CMD_MAX: highest used command number | 93 | * @NL80211_CMD_MAX: highest used command number |
82 | * @__NL80211_CMD_AFTER_LAST: internal use | 94 | * @__NL80211_CMD_AFTER_LAST: internal use |
83 | */ | 95 | */ |
@@ -112,6 +124,11 @@ enum nl80211_commands { | |||
112 | 124 | ||
113 | /* add commands here */ | 125 | /* add commands here */ |
114 | 126 | ||
127 | NL80211_CMD_GET_MPATH, | ||
128 | NL80211_CMD_SET_MPATH, | ||
129 | NL80211_CMD_NEW_MPATH, | ||
130 | NL80211_CMD_DEL_MPATH, | ||
131 | |||
115 | /* used to define NL80211_CMD_MAX below */ | 132 | /* used to define NL80211_CMD_MAX below */ |
116 | __NL80211_CMD_AFTER_LAST, | 133 | __NL80211_CMD_AFTER_LAST, |
117 | NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 | 134 | NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 |
@@ -157,9 +174,23 @@ enum nl80211_commands { | |||
157 | * restriction (at most %NL80211_MAX_SUPP_RATES). | 174 | * restriction (at most %NL80211_MAX_SUPP_RATES). |
158 | * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station | 175 | * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station |
159 | * to, or the AP interface the station was originally added to to. | 176 | * to, or the AP interface the station was originally added to to. |
160 | * @NL80211_ATTR_STA_STATS: statistics for a station, part of station info | 177 | * @NL80211_ATTR_STA_INFO: information about a station, part of station info |
161 | * given for %NL80211_CMD_GET_STATION, nested attribute containing | 178 | * given for %NL80211_CMD_GET_STATION, nested attribute containing |
162 | * info as possible, see &enum nl80211_sta_stats. | 179 | * info as possible, see &enum nl80211_sta_info. |
180 | * | ||
181 | * @NL80211_ATTR_WIPHY_BANDS: Information about an operating bands, | ||
182 | * consisting of a nested array. | ||
183 | * | ||
184 | * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes). | ||
185 | * @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link. | ||
186 | * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. | ||
187 | * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path | ||
188 | * info given for %NL80211_CMD_GET_MPATH, nested attribute described at | ||
189 | * &enum nl80211_mpath_info. | ||
190 | * | ||
191 | * | ||
192 | * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of | ||
193 | * &enum nl80211_mntr_flags. | ||
163 | * | 194 | * |
164 | * @NL80211_ATTR_MAX: highest attribute number currently defined | 195 | * @NL80211_ATTR_MAX: highest attribute number currently defined |
165 | * @__NL80211_ATTR_AFTER_LAST: internal use | 196 | * @__NL80211_ATTR_AFTER_LAST: internal use |
@@ -193,10 +224,19 @@ enum nl80211_attrs { | |||
193 | NL80211_ATTR_STA_LISTEN_INTERVAL, | 224 | NL80211_ATTR_STA_LISTEN_INTERVAL, |
194 | NL80211_ATTR_STA_SUPPORTED_RATES, | 225 | NL80211_ATTR_STA_SUPPORTED_RATES, |
195 | NL80211_ATTR_STA_VLAN, | 226 | NL80211_ATTR_STA_VLAN, |
196 | NL80211_ATTR_STA_STATS, | 227 | NL80211_ATTR_STA_INFO, |
228 | |||
229 | NL80211_ATTR_WIPHY_BANDS, | ||
230 | |||
231 | NL80211_ATTR_MNTR_FLAGS, | ||
197 | 232 | ||
198 | /* add attributes here, update the policy in nl80211.c */ | 233 | /* add attributes here, update the policy in nl80211.c */ |
199 | 234 | ||
235 | NL80211_ATTR_MESH_ID, | ||
236 | NL80211_ATTR_STA_PLINK_ACTION, | ||
237 | NL80211_ATTR_MPATH_NEXT_HOP, | ||
238 | NL80211_ATTR_MPATH_INFO, | ||
239 | |||
200 | __NL80211_ATTR_AFTER_LAST, | 240 | __NL80211_ATTR_AFTER_LAST, |
201 | NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 | 241 | NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 |
202 | }; | 242 | }; |
@@ -213,6 +253,7 @@ enum nl80211_attrs { | |||
213 | * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points | 253 | * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points |
214 | * @NL80211_IFTYPE_WDS: wireless distribution interface | 254 | * @NL80211_IFTYPE_WDS: wireless distribution interface |
215 | * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames | 255 | * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames |
256 | * @NL80211_IFTYPE_MESH_POINT: mesh point | ||
216 | * @NL80211_IFTYPE_MAX: highest interface type number currently defined | 257 | * @NL80211_IFTYPE_MAX: highest interface type number currently defined |
217 | * @__NL80211_IFTYPE_AFTER_LAST: internal use | 258 | * @__NL80211_IFTYPE_AFTER_LAST: internal use |
218 | * | 259 | * |
@@ -228,6 +269,7 @@ enum nl80211_iftype { | |||
228 | NL80211_IFTYPE_AP_VLAN, | 269 | NL80211_IFTYPE_AP_VLAN, |
229 | NL80211_IFTYPE_WDS, | 270 | NL80211_IFTYPE_WDS, |
230 | NL80211_IFTYPE_MONITOR, | 271 | NL80211_IFTYPE_MONITOR, |
272 | NL80211_IFTYPE_MESH_POINT, | ||
231 | 273 | ||
232 | /* keep last */ | 274 | /* keep last */ |
233 | __NL80211_IFTYPE_AFTER_LAST, | 275 | __NL80211_IFTYPE_AFTER_LAST, |
@@ -257,27 +299,167 @@ enum nl80211_sta_flags { | |||
257 | }; | 299 | }; |
258 | 300 | ||
259 | /** | 301 | /** |
260 | * enum nl80211_sta_stats - station statistics | 302 | * enum nl80211_sta_info - station information |
261 | * | 303 | * |
262 | * These attribute types are used with %NL80211_ATTR_STA_STATS | 304 | * These attribute types are used with %NL80211_ATTR_STA_INFO |
263 | * when getting information about a station. | 305 | * when getting information about a station. |
264 | * | 306 | * |
265 | * @__NL80211_STA_STAT_INVALID: attribute number 0 is reserved | 307 | * @__NL80211_STA_INFO_INVALID: attribute number 0 is reserved |
266 | * @NL80211_STA_STAT_INACTIVE_TIME: time since last activity (u32, msecs) | 308 | * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs) |
267 | * @NL80211_STA_STAT_RX_BYTES: total received bytes (u32, from this station) | 309 | * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station) |
268 | * @NL80211_STA_STAT_TX_BYTES: total transmitted bytes (u32, to this station) | 310 | * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) |
269 | * @__NL80211_STA_STAT_AFTER_LAST: internal | 311 | * @__NL80211_STA_INFO_AFTER_LAST: internal |
270 | * @NL80211_STA_STAT_MAX: highest possible station stats attribute | 312 | * @NL80211_STA_INFO_MAX: highest possible station info attribute |
313 | */ | ||
314 | enum nl80211_sta_info { | ||
315 | __NL80211_STA_INFO_INVALID, | ||
316 | NL80211_STA_INFO_INACTIVE_TIME, | ||
317 | NL80211_STA_INFO_RX_BYTES, | ||
318 | NL80211_STA_INFO_TX_BYTES, | ||
319 | NL80211_STA_INFO_LLID, | ||
320 | NL80211_STA_INFO_PLID, | ||
321 | NL80211_STA_INFO_PLINK_STATE, | ||
322 | |||
323 | /* keep last */ | ||
324 | __NL80211_STA_INFO_AFTER_LAST, | ||
325 | NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1 | ||
326 | }; | ||
327 | |||
328 | /** | ||
329 | * enum nl80211_mpath_flags - nl80211 mesh path flags | ||
330 | * | ||
331 | * @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active | ||
332 | * @NL80211_MPATH_FLAG_RESOLVING: the mesh path discovery process is running | ||
333 | * @NL80211_MPATH_FLAG_DSN_VALID: the mesh path contains a valid DSN | ||
334 | * @NL80211_MPATH_FLAG_FIXED: the mesh path has been manually set | ||
335 | * @NL80211_MPATH_FLAG_RESOLVED: the mesh path discovery process succeeded | ||
336 | */ | ||
337 | enum nl80211_mpath_flags { | ||
338 | NL80211_MPATH_FLAG_ACTIVE = 1<<0, | ||
339 | NL80211_MPATH_FLAG_RESOLVING = 1<<1, | ||
340 | NL80211_MPATH_FLAG_DSN_VALID = 1<<2, | ||
341 | NL80211_MPATH_FLAG_FIXED = 1<<3, | ||
342 | NL80211_MPATH_FLAG_RESOLVED = 1<<4, | ||
343 | }; | ||
344 | |||
345 | /** | ||
346 | * enum nl80211_mpath_info - mesh path information | ||
347 | * | ||
348 | * These attribute types are used with %NL80211_ATTR_MPATH_INFO when getting | ||
349 | * information about a mesh path. | ||
350 | * | ||
351 | * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved | ||
352 | * @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination | ||
353 | * @NL80211_ATTR_MPATH_DSN: destination sequence number | ||
354 | * @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path | ||
355 | * @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now | ||
356 | * @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in | ||
357 | * &enum nl80211_mpath_flags; | ||
358 | * @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec | ||
359 | * @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries | ||
360 | */ | ||
361 | enum nl80211_mpath_info { | ||
362 | __NL80211_MPATH_INFO_INVALID, | ||
363 | NL80211_MPATH_INFO_FRAME_QLEN, | ||
364 | NL80211_MPATH_INFO_DSN, | ||
365 | NL80211_MPATH_INFO_METRIC, | ||
366 | NL80211_MPATH_INFO_EXPTIME, | ||
367 | NL80211_MPATH_INFO_FLAGS, | ||
368 | NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, | ||
369 | NL80211_MPATH_INFO_DISCOVERY_RETRIES, | ||
370 | |||
371 | /* keep last */ | ||
372 | __NL80211_MPATH_INFO_AFTER_LAST, | ||
373 | NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1 | ||
374 | }; | ||
375 | |||
376 | /** | ||
377 | * enum nl80211_band_attr - band attributes | ||
378 | * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved | ||
379 | * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band, | ||
380 | * an array of nested frequency attributes | ||
381 | * @NL80211_BAND_ATTR_RATES: supported bitrates in this band, | ||
382 | * an array of nested bitrate attributes | ||
383 | */ | ||
384 | enum nl80211_band_attr { | ||
385 | __NL80211_BAND_ATTR_INVALID, | ||
386 | NL80211_BAND_ATTR_FREQS, | ||
387 | NL80211_BAND_ATTR_RATES, | ||
388 | |||
389 | /* keep last */ | ||
390 | __NL80211_BAND_ATTR_AFTER_LAST, | ||
391 | NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 | ||
392 | }; | ||
393 | |||
394 | /** | ||
395 | * enum nl80211_frequency_attr - frequency attributes | ||
396 | * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz | ||
397 | * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current | ||
398 | * regulatory domain. | ||
399 | * @NL80211_FREQUENCY_ATTR_PASSIVE_SCAN: Only passive scanning is | ||
400 | * permitted on this channel in current regulatory domain. | ||
401 | * @NL80211_FREQUENCY_ATTR_NO_IBSS: IBSS networks are not permitted | ||
402 | * on this channel in current regulatory domain. | ||
403 | * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory | ||
404 | * on this channel in current regulatory domain. | ||
405 | */ | ||
406 | enum nl80211_frequency_attr { | ||
407 | __NL80211_FREQUENCY_ATTR_INVALID, | ||
408 | NL80211_FREQUENCY_ATTR_FREQ, | ||
409 | NL80211_FREQUENCY_ATTR_DISABLED, | ||
410 | NL80211_FREQUENCY_ATTR_PASSIVE_SCAN, | ||
411 | NL80211_FREQUENCY_ATTR_NO_IBSS, | ||
412 | NL80211_FREQUENCY_ATTR_RADAR, | ||
413 | |||
414 | /* keep last */ | ||
415 | __NL80211_FREQUENCY_ATTR_AFTER_LAST, | ||
416 | NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1 | ||
417 | }; | ||
418 | |||
419 | /** | ||
420 | * enum nl80211_bitrate_attr - bitrate attributes | ||
421 | * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps | ||
422 | * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported | ||
423 | * in 2.4 GHz band. | ||
424 | */ | ||
425 | enum nl80211_bitrate_attr { | ||
426 | __NL80211_BITRATE_ATTR_INVALID, | ||
427 | NL80211_BITRATE_ATTR_RATE, | ||
428 | NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE, | ||
429 | |||
430 | /* keep last */ | ||
431 | __NL80211_BITRATE_ATTR_AFTER_LAST, | ||
432 | NL80211_BITRATE_ATTR_MAX = __NL80211_BITRATE_ATTR_AFTER_LAST - 1 | ||
433 | }; | ||
434 | |||
435 | /** | ||
436 | * enum nl80211_mntr_flags - monitor configuration flags | ||
437 | * | ||
438 | * Monitor configuration flags. | ||
439 | * | ||
440 | * @__NL80211_MNTR_FLAG_INVALID: reserved | ||
441 | * | ||
442 | * @NL80211_MNTR_FLAG_FCSFAIL: pass frames with bad FCS | ||
443 | * @NL80211_MNTR_FLAG_PLCPFAIL: pass frames with bad PLCP | ||
444 | * @NL80211_MNTR_FLAG_CONTROL: pass control frames | ||
445 | * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering | ||
446 | * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing. | ||
447 | * overrides all other flags. | ||
448 | * | ||
449 | * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use | ||
450 | * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag | ||
271 | */ | 451 | */ |
272 | enum nl80211_sta_stats { | 452 | enum nl80211_mntr_flags { |
273 | __NL80211_STA_STAT_INVALID, | 453 | __NL80211_MNTR_FLAG_INVALID, |
274 | NL80211_STA_STAT_INACTIVE_TIME, | 454 | NL80211_MNTR_FLAG_FCSFAIL, |
275 | NL80211_STA_STAT_RX_BYTES, | 455 | NL80211_MNTR_FLAG_PLCPFAIL, |
276 | NL80211_STA_STAT_TX_BYTES, | 456 | NL80211_MNTR_FLAG_CONTROL, |
457 | NL80211_MNTR_FLAG_OTHER_BSS, | ||
458 | NL80211_MNTR_FLAG_COOK_FRAMES, | ||
277 | 459 | ||
278 | /* keep last */ | 460 | /* keep last */ |
279 | __NL80211_STA_STAT_AFTER_LAST, | 461 | __NL80211_MNTR_FLAG_AFTER_LAST, |
280 | NL80211_STA_STAT_MAX = __NL80211_STA_STAT_AFTER_LAST - 1 | 462 | NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1 |
281 | }; | 463 | }; |
282 | 464 | ||
283 | #endif /* __LINUX_NL80211_H */ | 465 | #endif /* __LINUX_NL80211_H */ |
diff --git a/include/linux/pcounter.h b/include/linux/pcounter.h deleted file mode 100644 index a82d9f2628ca..000000000000 --- a/include/linux/pcounter.h +++ /dev/null | |||
@@ -1,74 +0,0 @@ | |||
1 | #ifndef __LINUX_PCOUNTER_H | ||
2 | #define __LINUX_PCOUNTER_H | ||
3 | /* | ||
4 | * Using a dynamic percpu 'int' variable has a cost : | ||
5 | * 1) Extra dereference | ||
6 | * Current per_cpu_ptr() implementation uses an array per 'percpu variable'. | ||
7 | * 2) memory cost of NR_CPUS*(32+sizeof(void *)) instead of num_possible_cpus()*4 | ||
8 | * | ||
9 | * This pcounter implementation is an abstraction to be able to use | ||
10 | * either a static or a dynamic per cpu variable. | ||
11 | * One dynamic per cpu variable gets a fast & cheap implementation, we can | ||
12 | * change pcounter implementation too. | ||
13 | */ | ||
14 | struct pcounter { | ||
15 | #ifdef CONFIG_SMP | ||
16 | void (*add)(struct pcounter *self, int inc); | ||
17 | int (*getval)(const struct pcounter *self, int cpu); | ||
18 | int *per_cpu_values; | ||
19 | #else | ||
20 | int val; | ||
21 | #endif | ||
22 | }; | ||
23 | |||
24 | #ifdef CONFIG_SMP | ||
25 | #include <linux/percpu.h> | ||
26 | |||
27 | #define DEFINE_PCOUNTER(NAME) \ | ||
28 | static DEFINE_PER_CPU(int, NAME##_pcounter_values); \ | ||
29 | static void NAME##_pcounter_add(struct pcounter *self, int val) \ | ||
30 | { \ | ||
31 | __get_cpu_var(NAME##_pcounter_values) += val; \ | ||
32 | } \ | ||
33 | static int NAME##_pcounter_getval(const struct pcounter *self, int cpu) \ | ||
34 | { \ | ||
35 | return per_cpu(NAME##_pcounter_values, cpu); \ | ||
36 | } \ | ||
37 | |||
38 | #define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER) \ | ||
39 | MEMBER = { \ | ||
40 | .add = NAME##_pcounter_add, \ | ||
41 | .getval = NAME##_pcounter_getval, \ | ||
42 | } | ||
43 | |||
44 | |||
45 | static inline void pcounter_add(struct pcounter *self, int inc) | ||
46 | { | ||
47 | self->add(self, inc); | ||
48 | } | ||
49 | |||
50 | extern int pcounter_getval(const struct pcounter *self); | ||
51 | extern int pcounter_alloc(struct pcounter *self); | ||
52 | extern void pcounter_free(struct pcounter *self); | ||
53 | |||
54 | |||
55 | #else /* CONFIG_SMP */ | ||
56 | |||
57 | static inline void pcounter_add(struct pcounter *self, int inc) | ||
58 | { | ||
59 | self->val += inc; | ||
60 | } | ||
61 | |||
62 | static inline int pcounter_getval(const struct pcounter *self) | ||
63 | { | ||
64 | return self->val; | ||
65 | } | ||
66 | |||
67 | #define DEFINE_PCOUNTER(NAME) | ||
68 | #define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER) | ||
69 | #define pcounter_alloc(self) 0 | ||
70 | #define pcounter_free(self) | ||
71 | |||
72 | #endif /* CONFIG_SMP */ | ||
73 | |||
74 | #endif /* __LINUX_PCOUNTER_H */ | ||
diff --git a/include/linux/phy.h b/include/linux/phy.h index 5e43ae751412..779cbcd65f62 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -39,7 +39,8 @@ | |||
39 | SUPPORTED_1000baseT_Half | \ | 39 | SUPPORTED_1000baseT_Half | \ |
40 | SUPPORTED_1000baseT_Full) | 40 | SUPPORTED_1000baseT_Full) |
41 | 41 | ||
42 | /* Set phydev->irq to PHY_POLL if interrupts are not supported, | 42 | /* |
43 | * Set phydev->irq to PHY_POLL if interrupts are not supported, | ||
43 | * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if | 44 | * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if |
44 | * the attached driver handles the interrupt | 45 | * the attached driver handles the interrupt |
45 | */ | 46 | */ |
@@ -63,8 +64,6 @@ typedef enum { | |||
63 | PHY_INTERFACE_MODE_RTBI | 64 | PHY_INTERFACE_MODE_RTBI |
64 | } phy_interface_t; | 65 | } phy_interface_t; |
65 | 66 | ||
66 | #define MII_BUS_MAX 4 | ||
67 | |||
68 | 67 | ||
69 | #define PHY_INIT_TIMEOUT 100000 | 68 | #define PHY_INIT_TIMEOUT 100000 |
70 | #define PHY_STATE_TIME 1 | 69 | #define PHY_STATE_TIME 1 |
@@ -74,20 +73,30 @@ typedef enum { | |||
74 | #define PHY_MAX_ADDR 32 | 73 | #define PHY_MAX_ADDR 32 |
75 | 74 | ||
76 | /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ | 75 | /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ |
77 | #define PHY_ID_FMT "%x:%02x" | 76 | #define PHY_ID_FMT "%s:%02x" |
78 | 77 | ||
79 | /* The Bus class for PHYs. Devices which provide access to | 78 | /* |
80 | * PHYs should register using this structure */ | 79 | * Need to be a little smaller than phydev->dev.bus_id to leave room |
80 | * for the ":%02x" | ||
81 | */ | ||
82 | #define MII_BUS_ID_SIZE (BUS_ID_SIZE - 3) | ||
83 | |||
84 | /* | ||
85 | * The Bus class for PHYs. Devices which provide access to | ||
86 | * PHYs should register using this structure | ||
87 | */ | ||
81 | struct mii_bus { | 88 | struct mii_bus { |
82 | const char *name; | 89 | const char *name; |
83 | int id; | 90 | char id[MII_BUS_ID_SIZE]; |
84 | void *priv; | 91 | void *priv; |
85 | int (*read)(struct mii_bus *bus, int phy_id, int regnum); | 92 | int (*read)(struct mii_bus *bus, int phy_id, int regnum); |
86 | int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); | 93 | int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); |
87 | int (*reset)(struct mii_bus *bus); | 94 | int (*reset)(struct mii_bus *bus); |
88 | 95 | ||
89 | /* A lock to ensure that only one thing can read/write | 96 | /* |
90 | * the MDIO bus at a time */ | 97 | * A lock to ensure that only one thing can read/write |
98 | * the MDIO bus at a time | ||
99 | */ | ||
91 | struct mutex mdio_lock; | 100 | struct mutex mdio_lock; |
92 | 101 | ||
93 | struct device *dev; | 102 | struct device *dev; |
@@ -98,8 +107,10 @@ struct mii_bus { | |||
98 | /* Phy addresses to be ignored when probing */ | 107 | /* Phy addresses to be ignored when probing */ |
99 | u32 phy_mask; | 108 | u32 phy_mask; |
100 | 109 | ||
101 | /* Pointer to an array of interrupts, each PHY's | 110 | /* |
102 | * interrupt at the index matching its address */ | 111 | * Pointer to an array of interrupts, each PHY's |
112 | * interrupt at the index matching its address | ||
113 | */ | ||
103 | int *irq; | 114 | int *irq; |
104 | }; | 115 | }; |
105 | 116 | ||
@@ -251,7 +262,8 @@ struct phy_device { | |||
251 | /* Bus address of the PHY (0-32) */ | 262 | /* Bus address of the PHY (0-32) */ |
252 | int addr; | 263 | int addr; |
253 | 264 | ||
254 | /* forced speed & duplex (no autoneg) | 265 | /* |
266 | * forced speed & duplex (no autoneg) | ||
255 | * partner speed & duplex & pause (autoneg) | 267 | * partner speed & duplex & pause (autoneg) |
256 | */ | 268 | */ |
257 | int speed; | 269 | int speed; |
@@ -274,8 +286,10 @@ struct phy_device { | |||
274 | 286 | ||
275 | int link_timeout; | 287 | int link_timeout; |
276 | 288 | ||
277 | /* Interrupt number for this PHY | 289 | /* |
278 | * -1 means no interrupt */ | 290 | * Interrupt number for this PHY |
291 | * -1 means no interrupt | ||
292 | */ | ||
279 | int irq; | 293 | int irq; |
280 | 294 | ||
281 | /* private data pointer */ | 295 | /* private data pointer */ |
@@ -325,22 +339,28 @@ struct phy_driver { | |||
325 | u32 features; | 339 | u32 features; |
326 | u32 flags; | 340 | u32 flags; |
327 | 341 | ||
328 | /* Called to initialize the PHY, | 342 | /* |
329 | * including after a reset */ | 343 | * Called to initialize the PHY, |
344 | * including after a reset | ||
345 | */ | ||
330 | int (*config_init)(struct phy_device *phydev); | 346 | int (*config_init)(struct phy_device *phydev); |
331 | 347 | ||
332 | /* Called during discovery. Used to set | 348 | /* |
333 | * up device-specific structures, if any */ | 349 | * Called during discovery. Used to set |
350 | * up device-specific structures, if any | ||
351 | */ | ||
334 | int (*probe)(struct phy_device *phydev); | 352 | int (*probe)(struct phy_device *phydev); |
335 | 353 | ||
336 | /* PHY Power Management */ | 354 | /* PHY Power Management */ |
337 | int (*suspend)(struct phy_device *phydev); | 355 | int (*suspend)(struct phy_device *phydev); |
338 | int (*resume)(struct phy_device *phydev); | 356 | int (*resume)(struct phy_device *phydev); |
339 | 357 | ||
340 | /* Configures the advertisement and resets | 358 | /* |
359 | * Configures the advertisement and resets | ||
341 | * autonegotiation if phydev->autoneg is on, | 360 | * autonegotiation if phydev->autoneg is on, |
342 | * forces the speed to the current settings in phydev | 361 | * forces the speed to the current settings in phydev |
343 | * if phydev->autoneg is off */ | 362 | * if phydev->autoneg is off |
363 | */ | ||
344 | int (*config_aneg)(struct phy_device *phydev); | 364 | int (*config_aneg)(struct phy_device *phydev); |
345 | 365 | ||
346 | /* Determines the negotiated speed and duplex */ | 366 | /* Determines the negotiated speed and duplex */ |
@@ -361,6 +381,7 @@ struct phy_driver { | |||
361 | 381 | ||
362 | int phy_read(struct phy_device *phydev, u16 regnum); | 382 | int phy_read(struct phy_device *phydev, u16 regnum); |
363 | int phy_write(struct phy_device *phydev, u16 regnum, u16 val); | 383 | int phy_write(struct phy_device *phydev, u16 regnum, u16 val); |
384 | int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); | ||
364 | struct phy_device* get_phy_device(struct mii_bus *bus, int addr); | 385 | struct phy_device* get_phy_device(struct mii_bus *bus, int addr); |
365 | int phy_clear_interrupt(struct phy_device *phydev); | 386 | int phy_clear_interrupt(struct phy_device *phydev); |
366 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); | 387 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); |
diff --git a/include/linux/pim.h b/include/linux/pim.h new file mode 100644 index 000000000000..236ffd317394 --- /dev/null +++ b/include/linux/pim.h | |||
@@ -0,0 +1,45 @@ | |||
1 | #ifndef __LINUX_PIM_H | ||
2 | #define __LINUX_PIM_H | ||
3 | |||
4 | #include <asm/byteorder.h> | ||
5 | |||
6 | #ifndef __KERNEL__ | ||
7 | struct pim { | ||
8 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
9 | __u8 pim_type:4, /* PIM message type */ | ||
10 | pim_ver:4; /* PIM version */ | ||
11 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
12 | __u8 pim_ver:4; /* PIM version */ | ||
13 | pim_type:4; /* PIM message type */ | ||
14 | #endif | ||
15 | __u8 pim_rsv; /* Reserved */ | ||
16 | __be16 pim_cksum; /* Checksum */ | ||
17 | }; | ||
18 | |||
19 | #define PIM_MINLEN 8 | ||
20 | #endif | ||
21 | |||
22 | /* Message types - V1 */ | ||
23 | #define PIM_V1_VERSION __constant_htonl(0x10000000) | ||
24 | #define PIM_V1_REGISTER 1 | ||
25 | |||
26 | /* Message types - V2 */ | ||
27 | #define PIM_VERSION 2 | ||
28 | #define PIM_REGISTER 1 | ||
29 | |||
30 | #if defined(__KERNEL__) | ||
31 | #define PIM_NULL_REGISTER __constant_htonl(0x40000000) | ||
32 | |||
33 | /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ | ||
34 | struct pimreghdr | ||
35 | { | ||
36 | __u8 type; | ||
37 | __u8 reserved; | ||
38 | __be16 csum; | ||
39 | __be32 flags; | ||
40 | }; | ||
41 | |||
42 | struct sk_buff; | ||
43 | extern int pim_rcv_v1(struct sk_buff *); | ||
44 | #endif | ||
45 | #endif | ||
diff --git a/include/linux/quota.h b/include/linux/quota.h index 6e0393a5b2ea..eb560d031acd 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -160,14 +160,18 @@ enum { | |||
160 | 160 | ||
161 | 161 | ||
162 | #ifdef __KERNEL__ | 162 | #ifdef __KERNEL__ |
163 | #include <linux/spinlock.h> | 163 | #include <linux/list.h> |
164 | #include <linux/rwsem.h> | ||
165 | #include <linux/mutex.h> | 164 | #include <linux/mutex.h> |
165 | #include <linux/rwsem.h> | ||
166 | #include <linux/spinlock.h> | ||
167 | #include <linux/wait.h> | ||
166 | 168 | ||
167 | #include <linux/dqblk_xfs.h> | 169 | #include <linux/dqblk_xfs.h> |
168 | #include <linux/dqblk_v1.h> | 170 | #include <linux/dqblk_v1.h> |
169 | #include <linux/dqblk_v2.h> | 171 | #include <linux/dqblk_v2.h> |
170 | 172 | ||
173 | #include <asm/atomic.h> | ||
174 | |||
171 | extern spinlock_t dq_data_lock; | 175 | extern spinlock_t dq_data_lock; |
172 | 176 | ||
173 | /* Maximal numbers of writes for quota operation (insert/delete/update) | 177 | /* Maximal numbers of writes for quota operation (insert/delete/update) |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index a3d567a974e8..71fc81360048 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -213,6 +213,11 @@ int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, | |||
213 | sg_alloc_fn *); | 213 | sg_alloc_fn *); |
214 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); | 214 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); |
215 | 215 | ||
216 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | ||
217 | void *buf, size_t buflen); | ||
218 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | ||
219 | void *buf, size_t buflen); | ||
220 | |||
216 | /* | 221 | /* |
217 | * Maximum number of entries that will be allocated in one piece, if | 222 | * Maximum number of entries that will be allocated in one piece, if |
218 | * a list larger than this is required then chaining will be utilized. | 223 | * a list larger than this is required then chaining will be utilized. |
diff --git a/include/linux/security.h b/include/linux/security.h index c673dfd4dffc..fea1f4aa4dd5 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -36,7 +36,11 @@ | |||
36 | 36 | ||
37 | extern unsigned securebits; | 37 | extern unsigned securebits; |
38 | 38 | ||
39 | /* Maximum number of letters for an LSM name string */ | ||
40 | #define SECURITY_NAME_MAX 10 | ||
41 | |||
39 | struct ctl_table; | 42 | struct ctl_table; |
43 | struct audit_krule; | ||
40 | 44 | ||
41 | /* | 45 | /* |
42 | * These functions are in security/capability.c and are used | 46 | * These functions are in security/capability.c and are used |
@@ -136,6 +140,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
136 | /** | 140 | /** |
137 | * struct security_operations - main security structure | 141 | * struct security_operations - main security structure |
138 | * | 142 | * |
143 | * Security module identifier. | ||
144 | * | ||
145 | * @name: | ||
146 | * A string that acts as a unique identifeir for the LSM with max number | ||
147 | * of characters = SECURITY_NAME_MAX. | ||
148 | * | ||
139 | * Security hooks for program execution operations. | 149 | * Security hooks for program execution operations. |
140 | * | 150 | * |
141 | * @bprm_alloc_security: | 151 | * @bprm_alloc_security: |
@@ -468,6 +478,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
468 | * @dentry is the dentry being changed. | 478 | * @dentry is the dentry being changed. |
469 | * Return 0 on success. If error is returned, then the operation | 479 | * Return 0 on success. If error is returned, then the operation |
470 | * causing setuid bit removal is failed. | 480 | * causing setuid bit removal is failed. |
481 | * @inode_getsecid: | ||
482 | * Get the secid associated with the node. | ||
483 | * @inode contains a pointer to the inode. | ||
484 | * @secid contains a pointer to the location where result will be saved. | ||
485 | * In case of failure, @secid will be set to zero. | ||
471 | * | 486 | * |
472 | * Security hooks for file operations | 487 | * Security hooks for file operations |
473 | * | 488 | * |
@@ -636,6 +651,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
636 | * @task_getsecid: | 651 | * @task_getsecid: |
637 | * Retrieve the security identifier of the process @p. | 652 | * Retrieve the security identifier of the process @p. |
638 | * @p contains the task_struct for the process and place is into @secid. | 653 | * @p contains the task_struct for the process and place is into @secid. |
654 | * In case of failure, @secid will be set to zero. | ||
655 | * | ||
639 | * @task_setgroups: | 656 | * @task_setgroups: |
640 | * Check permission before setting the supplementary group set of the | 657 | * Check permission before setting the supplementary group set of the |
641 | * current process. | 658 | * current process. |
@@ -910,24 +927,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
910 | * Security hooks for XFRM operations. | 927 | * Security hooks for XFRM operations. |
911 | * | 928 | * |
912 | * @xfrm_policy_alloc_security: | 929 | * @xfrm_policy_alloc_security: |
913 | * @xp contains the xfrm_policy being added to Security Policy Database | 930 | * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy |
914 | * used by the XFRM system. | 931 | * Database used by the XFRM system. |
915 | * @sec_ctx contains the security context information being provided by | 932 | * @sec_ctx contains the security context information being provided by |
916 | * the user-level policy update program (e.g., setkey). | 933 | * the user-level policy update program (e.g., setkey). |
917 | * Allocate a security structure to the xp->security field; the security | 934 | * Allocate a security structure to the xp->security field; the security |
918 | * field is initialized to NULL when the xfrm_policy is allocated. | 935 | * field is initialized to NULL when the xfrm_policy is allocated. |
919 | * Return 0 if operation was successful (memory to allocate, legal context) | 936 | * Return 0 if operation was successful (memory to allocate, legal context) |
920 | * @xfrm_policy_clone_security: | 937 | * @xfrm_policy_clone_security: |
921 | * @old contains an existing xfrm_policy in the SPD. | 938 | * @old_ctx contains an existing xfrm_sec_ctx. |
922 | * @new contains a new xfrm_policy being cloned from old. | 939 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. |
923 | * Allocate a security structure to the new->security field | 940 | * Allocate a security structure in new_ctxp that contains the |
924 | * that contains the information from the old->security field. | 941 | * information from the old_ctx structure. |
925 | * Return 0 if operation was successful (memory to allocate). | 942 | * Return 0 if operation was successful (memory to allocate). |
926 | * @xfrm_policy_free_security: | 943 | * @xfrm_policy_free_security: |
927 | * @xp contains the xfrm_policy | 944 | * @ctx contains the xfrm_sec_ctx |
928 | * Deallocate xp->security. | 945 | * Deallocate xp->security. |
929 | * @xfrm_policy_delete_security: | 946 | * @xfrm_policy_delete_security: |
930 | * @xp contains the xfrm_policy. | 947 | * @ctx contains the xfrm_sec_ctx. |
931 | * Authorize deletion of xp->security. | 948 | * Authorize deletion of xp->security. |
932 | * @xfrm_state_alloc_security: | 949 | * @xfrm_state_alloc_security: |
933 | * @x contains the xfrm_state being added to the Security Association | 950 | * @x contains the xfrm_state being added to the Security Association |
@@ -947,7 +964,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
947 | * @x contains the xfrm_state. | 964 | * @x contains the xfrm_state. |
948 | * Authorize deletion of x->security. | 965 | * Authorize deletion of x->security. |
949 | * @xfrm_policy_lookup: | 966 | * @xfrm_policy_lookup: |
950 | * @xp contains the xfrm_policy for which the access control is being | 967 | * @ctx contains the xfrm_sec_ctx for which the access control is being |
951 | * checked. | 968 | * checked. |
952 | * @fl_secid contains the flow security label that is used to authorize | 969 | * @fl_secid contains the flow security label that is used to authorize |
953 | * access to the policy xp. | 970 | * access to the policy xp. |
@@ -997,6 +1014,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
997 | * @ipcp contains the kernel IPC permission structure | 1014 | * @ipcp contains the kernel IPC permission structure |
998 | * @flag contains the desired (requested) permission set | 1015 | * @flag contains the desired (requested) permission set |
999 | * Return 0 if permission is granted. | 1016 | * Return 0 if permission is granted. |
1017 | * @ipc_getsecid: | ||
1018 | * Get the secid associated with the ipc object. | ||
1019 | * @ipcp contains the kernel IPC permission structure. | ||
1020 | * @secid contains a pointer to the location where result will be saved. | ||
1021 | * In case of failure, @secid will be set to zero. | ||
1000 | * | 1022 | * |
1001 | * Security hooks for individual messages held in System V IPC message queues | 1023 | * Security hooks for individual messages held in System V IPC message queues |
1002 | * @msg_msg_alloc_security: | 1024 | * @msg_msg_alloc_security: |
@@ -1223,9 +1245,42 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1223 | * @secdata contains the security context. | 1245 | * @secdata contains the security context. |
1224 | * @seclen contains the length of the security context. | 1246 | * @seclen contains the length of the security context. |
1225 | * | 1247 | * |
1248 | * Security hooks for Audit | ||
1249 | * | ||
1250 | * @audit_rule_init: | ||
1251 | * Allocate and initialize an LSM audit rule structure. | ||
1252 | * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h | ||
1253 | * @op contains the operator the rule uses. | ||
1254 | * @rulestr contains the context where the rule will be applied to. | ||
1255 | * @lsmrule contains a pointer to receive the result. | ||
1256 | * Return 0 if @lsmrule has been successfully set, | ||
1257 | * -EINVAL in case of an invalid rule. | ||
1258 | * | ||
1259 | * @audit_rule_known: | ||
1260 | * Specifies whether given @rule contains any fields related to current LSM. | ||
1261 | * @rule contains the audit rule of interest. | ||
1262 | * Return 1 in case of relation found, 0 otherwise. | ||
1263 | * | ||
1264 | * @audit_rule_match: | ||
1265 | * Determine if given @secid matches a rule previously approved | ||
1266 | * by @audit_rule_known. | ||
1267 | * @secid contains the security id in question. | ||
1268 | * @field contains the field which relates to current LSM. | ||
1269 | * @op contains the operator that will be used for matching. | ||
1270 | * @rule points to the audit rule that will be checked against. | ||
1271 | * @actx points to the audit context associated with the check. | ||
1272 | * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. | ||
1273 | * | ||
1274 | * @audit_rule_free: | ||
1275 | * Deallocate the LSM audit rule structure previously allocated by | ||
1276 | * audit_rule_init. | ||
1277 | * @rule contains the allocated rule | ||
1278 | * | ||
1226 | * This is the main security structure. | 1279 | * This is the main security structure. |
1227 | */ | 1280 | */ |
1228 | struct security_operations { | 1281 | struct security_operations { |
1282 | char name[SECURITY_NAME_MAX + 1]; | ||
1283 | |||
1229 | int (*ptrace) (struct task_struct * parent, struct task_struct * child); | 1284 | int (*ptrace) (struct task_struct * parent, struct task_struct * child); |
1230 | int (*capget) (struct task_struct * target, | 1285 | int (*capget) (struct task_struct * target, |
1231 | kernel_cap_t * effective, | 1286 | kernel_cap_t * effective, |
@@ -1317,6 +1372,7 @@ struct security_operations { | |||
1317 | int (*inode_getsecurity)(const struct inode *inode, const char *name, void **buffer, bool alloc); | 1372 | int (*inode_getsecurity)(const struct inode *inode, const char *name, void **buffer, bool alloc); |
1318 | int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags); | 1373 | int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags); |
1319 | int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size); | 1374 | int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size); |
1375 | void (*inode_getsecid)(const struct inode *inode, u32 *secid); | ||
1320 | 1376 | ||
1321 | int (*file_permission) (struct file * file, int mask); | 1377 | int (*file_permission) (struct file * file, int mask); |
1322 | int (*file_alloc_security) (struct file * file); | 1378 | int (*file_alloc_security) (struct file * file); |
@@ -1369,6 +1425,7 @@ struct security_operations { | |||
1369 | void (*task_to_inode)(struct task_struct *p, struct inode *inode); | 1425 | void (*task_to_inode)(struct task_struct *p, struct inode *inode); |
1370 | 1426 | ||
1371 | int (*ipc_permission) (struct kern_ipc_perm * ipcp, short flag); | 1427 | int (*ipc_permission) (struct kern_ipc_perm * ipcp, short flag); |
1428 | void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid); | ||
1372 | 1429 | ||
1373 | int (*msg_msg_alloc_security) (struct msg_msg * msg); | 1430 | int (*msg_msg_alloc_security) (struct msg_msg * msg); |
1374 | void (*msg_msg_free_security) (struct msg_msg * msg); | 1431 | void (*msg_msg_free_security) (struct msg_msg * msg); |
@@ -1454,17 +1511,17 @@ struct security_operations { | |||
1454 | #endif /* CONFIG_SECURITY_NETWORK */ | 1511 | #endif /* CONFIG_SECURITY_NETWORK */ |
1455 | 1512 | ||
1456 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1513 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
1457 | int (*xfrm_policy_alloc_security) (struct xfrm_policy *xp, | 1514 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, |
1458 | struct xfrm_user_sec_ctx *sec_ctx); | 1515 | struct xfrm_user_sec_ctx *sec_ctx); |
1459 | int (*xfrm_policy_clone_security) (struct xfrm_policy *old, struct xfrm_policy *new); | 1516 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); |
1460 | void (*xfrm_policy_free_security) (struct xfrm_policy *xp); | 1517 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); |
1461 | int (*xfrm_policy_delete_security) (struct xfrm_policy *xp); | 1518 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); |
1462 | int (*xfrm_state_alloc_security) (struct xfrm_state *x, | 1519 | int (*xfrm_state_alloc_security) (struct xfrm_state *x, |
1463 | struct xfrm_user_sec_ctx *sec_ctx, | 1520 | struct xfrm_user_sec_ctx *sec_ctx, |
1464 | u32 secid); | 1521 | u32 secid); |
1465 | void (*xfrm_state_free_security) (struct xfrm_state *x); | 1522 | void (*xfrm_state_free_security) (struct xfrm_state *x); |
1466 | int (*xfrm_state_delete_security) (struct xfrm_state *x); | 1523 | int (*xfrm_state_delete_security) (struct xfrm_state *x); |
1467 | int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir); | 1524 | int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); |
1468 | int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, | 1525 | int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, |
1469 | struct xfrm_policy *xp, struct flowi *fl); | 1526 | struct xfrm_policy *xp, struct flowi *fl); |
1470 | int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); | 1527 | int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); |
@@ -1480,10 +1537,18 @@ struct security_operations { | |||
1480 | 1537 | ||
1481 | #endif /* CONFIG_KEYS */ | 1538 | #endif /* CONFIG_KEYS */ |
1482 | 1539 | ||
1540 | #ifdef CONFIG_AUDIT | ||
1541 | int (*audit_rule_init)(u32 field, u32 op, char *rulestr, void **lsmrule); | ||
1542 | int (*audit_rule_known)(struct audit_krule *krule); | ||
1543 | int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, | ||
1544 | struct audit_context *actx); | ||
1545 | void (*audit_rule_free)(void *lsmrule); | ||
1546 | #endif /* CONFIG_AUDIT */ | ||
1483 | }; | 1547 | }; |
1484 | 1548 | ||
1485 | /* prototypes */ | 1549 | /* prototypes */ |
1486 | extern int security_init (void); | 1550 | extern int security_init (void); |
1551 | extern int security_module_enable(struct security_operations *ops); | ||
1487 | extern int register_security (struct security_operations *ops); | 1552 | extern int register_security (struct security_operations *ops); |
1488 | extern int mod_reg_security (const char *name, struct security_operations *ops); | 1553 | extern int mod_reg_security (const char *name, struct security_operations *ops); |
1489 | extern struct dentry *securityfs_create_file(const char *name, mode_t mode, | 1554 | extern struct dentry *securityfs_create_file(const char *name, mode_t mode, |
@@ -1578,6 +1643,7 @@ int security_inode_killpriv(struct dentry *dentry); | |||
1578 | int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); | 1643 | int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); |
1579 | int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); | 1644 | int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); |
1580 | int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); | 1645 | int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); |
1646 | void security_inode_getsecid(const struct inode *inode, u32 *secid); | ||
1581 | int security_file_permission(struct file *file, int mask); | 1647 | int security_file_permission(struct file *file, int mask); |
1582 | int security_file_alloc(struct file *file); | 1648 | int security_file_alloc(struct file *file); |
1583 | void security_file_free(struct file *file); | 1649 | void security_file_free(struct file *file); |
@@ -1622,6 +1688,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1622 | void security_task_reparent_to_init(struct task_struct *p); | 1688 | void security_task_reparent_to_init(struct task_struct *p); |
1623 | void security_task_to_inode(struct task_struct *p, struct inode *inode); | 1689 | void security_task_to_inode(struct task_struct *p, struct inode *inode); |
1624 | int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); | 1690 | int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); |
1691 | void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); | ||
1625 | int security_msg_msg_alloc(struct msg_msg *msg); | 1692 | int security_msg_msg_alloc(struct msg_msg *msg); |
1626 | void security_msg_msg_free(struct msg_msg *msg); | 1693 | void security_msg_msg_free(struct msg_msg *msg); |
1627 | int security_msg_queue_alloc(struct msg_queue *msq); | 1694 | int security_msg_queue_alloc(struct msg_queue *msq); |
@@ -2022,6 +2089,11 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer, | |||
2022 | return 0; | 2089 | return 0; |
2023 | } | 2090 | } |
2024 | 2091 | ||
2092 | static inline void security_inode_getsecid(const struct inode *inode, u32 *secid) | ||
2093 | { | ||
2094 | *secid = 0; | ||
2095 | } | ||
2096 | |||
2025 | static inline int security_file_permission (struct file *file, int mask) | 2097 | static inline int security_file_permission (struct file *file, int mask) |
2026 | { | 2098 | { |
2027 | return 0; | 2099 | return 0; |
@@ -2137,7 +2209,9 @@ static inline int security_task_getsid (struct task_struct *p) | |||
2137 | } | 2209 | } |
2138 | 2210 | ||
2139 | static inline void security_task_getsecid (struct task_struct *p, u32 *secid) | 2211 | static inline void security_task_getsecid (struct task_struct *p, u32 *secid) |
2140 | { } | 2212 | { |
2213 | *secid = 0; | ||
2214 | } | ||
2141 | 2215 | ||
2142 | static inline int security_task_setgroups (struct group_info *group_info) | 2216 | static inline int security_task_setgroups (struct group_info *group_info) |
2143 | { | 2217 | { |
@@ -2216,6 +2290,11 @@ static inline int security_ipc_permission (struct kern_ipc_perm *ipcp, | |||
2216 | return 0; | 2290 | return 0; |
2217 | } | 2291 | } |
2218 | 2292 | ||
2293 | static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) | ||
2294 | { | ||
2295 | *secid = 0; | ||
2296 | } | ||
2297 | |||
2219 | static inline int security_msg_msg_alloc (struct msg_msg * msg) | 2298 | static inline int security_msg_msg_alloc (struct msg_msg * msg) |
2220 | { | 2299 | { |
2221 | return 0; | 2300 | return 0; |
@@ -2562,16 +2641,16 @@ static inline void security_inet_conn_established(struct sock *sk, | |||
2562 | 2641 | ||
2563 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2642 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
2564 | 2643 | ||
2565 | int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx); | 2644 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); |
2566 | int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new); | 2645 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); |
2567 | void security_xfrm_policy_free(struct xfrm_policy *xp); | 2646 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); |
2568 | int security_xfrm_policy_delete(struct xfrm_policy *xp); | 2647 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); |
2569 | int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); | 2648 | int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); |
2570 | int security_xfrm_state_alloc_acquire(struct xfrm_state *x, | 2649 | int security_xfrm_state_alloc_acquire(struct xfrm_state *x, |
2571 | struct xfrm_sec_ctx *polsec, u32 secid); | 2650 | struct xfrm_sec_ctx *polsec, u32 secid); |
2572 | int security_xfrm_state_delete(struct xfrm_state *x); | 2651 | int security_xfrm_state_delete(struct xfrm_state *x); |
2573 | void security_xfrm_state_free(struct xfrm_state *x); | 2652 | void security_xfrm_state_free(struct xfrm_state *x); |
2574 | int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir); | 2653 | int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); |
2575 | int security_xfrm_state_pol_flow_match(struct xfrm_state *x, | 2654 | int security_xfrm_state_pol_flow_match(struct xfrm_state *x, |
2576 | struct xfrm_policy *xp, struct flowi *fl); | 2655 | struct xfrm_policy *xp, struct flowi *fl); |
2577 | int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); | 2656 | int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); |
@@ -2579,21 +2658,21 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); | |||
2579 | 2658 | ||
2580 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ | 2659 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ |
2581 | 2660 | ||
2582 | static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx) | 2661 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) |
2583 | { | 2662 | { |
2584 | return 0; | 2663 | return 0; |
2585 | } | 2664 | } |
2586 | 2665 | ||
2587 | static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new) | 2666 | static inline int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp) |
2588 | { | 2667 | { |
2589 | return 0; | 2668 | return 0; |
2590 | } | 2669 | } |
2591 | 2670 | ||
2592 | static inline void security_xfrm_policy_free(struct xfrm_policy *xp) | 2671 | static inline void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) |
2593 | { | 2672 | { |
2594 | } | 2673 | } |
2595 | 2674 | ||
2596 | static inline int security_xfrm_policy_delete(struct xfrm_policy *xp) | 2675 | static inline int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) |
2597 | { | 2676 | { |
2598 | return 0; | 2677 | return 0; |
2599 | } | 2678 | } |
@@ -2619,7 +2698,7 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x) | |||
2619 | return 0; | 2698 | return 0; |
2620 | } | 2699 | } |
2621 | 2700 | ||
2622 | static inline int security_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir) | 2701 | static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) |
2623 | { | 2702 | { |
2624 | return 0; | 2703 | return 0; |
2625 | } | 2704 | } |
@@ -2672,5 +2751,38 @@ static inline int security_key_permission(key_ref_t key_ref, | |||
2672 | #endif | 2751 | #endif |
2673 | #endif /* CONFIG_KEYS */ | 2752 | #endif /* CONFIG_KEYS */ |
2674 | 2753 | ||
2754 | #ifdef CONFIG_AUDIT | ||
2755 | #ifdef CONFIG_SECURITY | ||
2756 | int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); | ||
2757 | int security_audit_rule_known(struct audit_krule *krule); | ||
2758 | int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, | ||
2759 | struct audit_context *actx); | ||
2760 | void security_audit_rule_free(void *lsmrule); | ||
2761 | |||
2762 | #else | ||
2763 | |||
2764 | static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr, | ||
2765 | void **lsmrule) | ||
2766 | { | ||
2767 | return 0; | ||
2768 | } | ||
2769 | |||
2770 | static inline int security_audit_rule_known(struct audit_krule *krule) | ||
2771 | { | ||
2772 | return 0; | ||
2773 | } | ||
2774 | |||
2775 | static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, | ||
2776 | void *lsmrule, struct audit_context *actx) | ||
2777 | { | ||
2778 | return 0; | ||
2779 | } | ||
2780 | |||
2781 | static inline void security_audit_rule_free(void *lsmrule) | ||
2782 | { } | ||
2783 | |||
2784 | #endif /* CONFIG_SECURITY */ | ||
2785 | #endif /* CONFIG_AUDIT */ | ||
2786 | |||
2675 | #endif /* ! __LINUX_SECURITY_H */ | 2787 | #endif /* ! __LINUX_SECURITY_H */ |
2676 | 2788 | ||
diff --git a/include/linux/selinux.h b/include/linux/selinux.h index 8c2cc4c02526..20f965d4b041 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h | |||
@@ -16,99 +16,11 @@ | |||
16 | 16 | ||
17 | struct selinux_audit_rule; | 17 | struct selinux_audit_rule; |
18 | struct audit_context; | 18 | struct audit_context; |
19 | struct inode; | ||
20 | struct kern_ipc_perm; | 19 | struct kern_ipc_perm; |
21 | 20 | ||
22 | #ifdef CONFIG_SECURITY_SELINUX | 21 | #ifdef CONFIG_SECURITY_SELINUX |
23 | 22 | ||
24 | /** | 23 | /** |
25 | * selinux_audit_rule_init - alloc/init an selinux audit rule structure. | ||
26 | * @field: the field this rule refers to | ||
27 | * @op: the operater the rule uses | ||
28 | * @rulestr: the text "target" of the rule | ||
29 | * @rule: pointer to the new rule structure returned via this | ||
30 | * | ||
31 | * Returns 0 if successful, -errno if not. On success, the rule structure | ||
32 | * will be allocated internally. The caller must free this structure with | ||
33 | * selinux_audit_rule_free() after use. | ||
34 | */ | ||
35 | int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, | ||
36 | struct selinux_audit_rule **rule); | ||
37 | |||
38 | /** | ||
39 | * selinux_audit_rule_free - free an selinux audit rule structure. | ||
40 | * @rule: pointer to the audit rule to be freed | ||
41 | * | ||
42 | * This will free all memory associated with the given rule. | ||
43 | * If @rule is NULL, no operation is performed. | ||
44 | */ | ||
45 | void selinux_audit_rule_free(struct selinux_audit_rule *rule); | ||
46 | |||
47 | /** | ||
48 | * selinux_audit_rule_match - determine if a context ID matches a rule. | ||
49 | * @sid: the context ID to check | ||
50 | * @field: the field this rule refers to | ||
51 | * @op: the operater the rule uses | ||
52 | * @rule: pointer to the audit rule to check against | ||
53 | * @actx: the audit context (can be NULL) associated with the check | ||
54 | * | ||
55 | * Returns 1 if the context id matches the rule, 0 if it does not, and | ||
56 | * -errno on failure. | ||
57 | */ | ||
58 | int selinux_audit_rule_match(u32 sid, u32 field, u32 op, | ||
59 | struct selinux_audit_rule *rule, | ||
60 | struct audit_context *actx); | ||
61 | |||
62 | /** | ||
63 | * selinux_audit_set_callback - set the callback for policy reloads. | ||
64 | * @callback: the function to call when the policy is reloaded | ||
65 | * | ||
66 | * This sets the function callback function that will update the rules | ||
67 | * upon policy reloads. This callback should rebuild all existing rules | ||
68 | * using selinux_audit_rule_init(). | ||
69 | */ | ||
70 | void selinux_audit_set_callback(int (*callback)(void)); | ||
71 | |||
72 | /** | ||
73 | * selinux_sid_to_string - map a security context ID to a string | ||
74 | * @sid: security context ID to be converted. | ||
75 | * @ctx: address of context string to be returned | ||
76 | * @ctxlen: length of returned context string. | ||
77 | * | ||
78 | * Returns 0 if successful, -errno if not. On success, the context | ||
79 | * string will be allocated internally, and the caller must call | ||
80 | * kfree() on it after use. | ||
81 | */ | ||
82 | int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen); | ||
83 | |||
84 | /** | ||
85 | * selinux_get_inode_sid - get the inode's security context ID | ||
86 | * @inode: inode structure to get the sid from. | ||
87 | * @sid: pointer to security context ID to be filled in. | ||
88 | * | ||
89 | * Returns nothing | ||
90 | */ | ||
91 | void selinux_get_inode_sid(const struct inode *inode, u32 *sid); | ||
92 | |||
93 | /** | ||
94 | * selinux_get_ipc_sid - get the ipc security context ID | ||
95 | * @ipcp: ipc structure to get the sid from. | ||
96 | * @sid: pointer to security context ID to be filled in. | ||
97 | * | ||
98 | * Returns nothing | ||
99 | */ | ||
100 | void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid); | ||
101 | |||
102 | /** | ||
103 | * selinux_get_task_sid - return the SID of task | ||
104 | * @tsk: the task whose SID will be returned | ||
105 | * @sid: pointer to security context ID to be filled in. | ||
106 | * | ||
107 | * Returns nothing | ||
108 | */ | ||
109 | void selinux_get_task_sid(struct task_struct *tsk, u32 *sid); | ||
110 | |||
111 | /** | ||
112 | * selinux_string_to_sid - map a security context string to a security ID | 24 | * selinux_string_to_sid - map a security context string to a security ID |
113 | * @str: the security context string to be mapped | 25 | * @str: the security context string to be mapped |
114 | * @sid: ID value returned via this. | 26 | * @sid: ID value returned via this. |
@@ -151,52 +63,6 @@ void selinux_secmark_refcount_inc(void); | |||
151 | void selinux_secmark_refcount_dec(void); | 63 | void selinux_secmark_refcount_dec(void); |
152 | #else | 64 | #else |
153 | 65 | ||
154 | static inline int selinux_audit_rule_init(u32 field, u32 op, | ||
155 | char *rulestr, | ||
156 | struct selinux_audit_rule **rule) | ||
157 | { | ||
158 | return -EOPNOTSUPP; | ||
159 | } | ||
160 | |||
161 | static inline void selinux_audit_rule_free(struct selinux_audit_rule *rule) | ||
162 | { | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | static inline int selinux_audit_rule_match(u32 sid, u32 field, u32 op, | ||
167 | struct selinux_audit_rule *rule, | ||
168 | struct audit_context *actx) | ||
169 | { | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static inline void selinux_audit_set_callback(int (*callback)(void)) | ||
174 | { | ||
175 | return; | ||
176 | } | ||
177 | |||
178 | static inline int selinux_sid_to_string(u32 sid, char **ctx, u32 *ctxlen) | ||
179 | { | ||
180 | *ctx = NULL; | ||
181 | *ctxlen = 0; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static inline void selinux_get_inode_sid(const struct inode *inode, u32 *sid) | ||
186 | { | ||
187 | *sid = 0; | ||
188 | } | ||
189 | |||
190 | static inline void selinux_get_ipc_sid(const struct kern_ipc_perm *ipcp, u32 *sid) | ||
191 | { | ||
192 | *sid = 0; | ||
193 | } | ||
194 | |||
195 | static inline void selinux_get_task_sid(struct task_struct *tsk, u32 *sid) | ||
196 | { | ||
197 | *sid = 0; | ||
198 | } | ||
199 | |||
200 | static inline int selinux_string_to_sid(const char *str, u32 *sid) | 66 | static inline int selinux_string_to_sid(const char *str, u32 *sid) |
201 | { | 67 | { |
202 | *sid = 0; | 68 | *sid = 0; |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h new file mode 100644 index 000000000000..9cae64b00d6b --- /dev/null +++ b/include/linux/semaphore.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Intel Corporation | ||
3 | * Author: Matthew Wilcox <willy@linux.intel.com> | ||
4 | * | ||
5 | * Distributed under the terms of the GNU GPL, version 2 | ||
6 | * | ||
7 | * Please see kernel/semaphore.c for documentation of these functions | ||
8 | */ | ||
9 | #ifndef __LINUX_SEMAPHORE_H | ||
10 | #define __LINUX_SEMAPHORE_H | ||
11 | |||
12 | #include <linux/list.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | |||
15 | /* Please don't access any members of this structure directly */ | ||
16 | struct semaphore { | ||
17 | spinlock_t lock; | ||
18 | unsigned int count; | ||
19 | struct list_head wait_list; | ||
20 | }; | ||
21 | |||
22 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
23 | { \ | ||
24 | .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ | ||
25 | .count = n, \ | ||
26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | ||
27 | } | ||
28 | |||
29 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | ||
31 | |||
32 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
33 | |||
34 | static inline void sema_init(struct semaphore *sem, int val) | ||
35 | { | ||
36 | static struct lock_class_key __key; | ||
37 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | ||
38 | lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); | ||
39 | } | ||
40 | |||
41 | #define init_MUTEX(sem) sema_init(sem, 1) | ||
42 | #define init_MUTEX_LOCKED(sem) sema_init(sem, 0) | ||
43 | |||
44 | extern void down(struct semaphore *sem); | ||
45 | extern int __must_check down_interruptible(struct semaphore *sem); | ||
46 | extern int __must_check down_killable(struct semaphore *sem); | ||
47 | extern int __must_check down_trylock(struct semaphore *sem); | ||
48 | extern int __must_check down_timeout(struct semaphore *sem, long jiffies); | ||
49 | extern void up(struct semaphore *sem); | ||
50 | |||
51 | #endif /* __LINUX_SEMAPHORE_H */ | ||
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 67c2563961f3..1da1e6208a0a 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -62,18 +62,5 @@ extern struct list_head *seq_list_start_head(struct list_head *head, | |||
62 | extern struct list_head *seq_list_next(void *v, struct list_head *head, | 62 | extern struct list_head *seq_list_next(void *v, struct list_head *head, |
63 | loff_t *ppos); | 63 | loff_t *ppos); |
64 | 64 | ||
65 | struct net; | ||
66 | struct seq_net_private { | ||
67 | struct net *net; | ||
68 | }; | ||
69 | |||
70 | int seq_open_net(struct inode *, struct file *, | ||
71 | const struct seq_operations *, int); | ||
72 | int seq_release_net(struct inode *, struct file *); | ||
73 | static inline struct net *seq_file_net(struct seq_file *seq) | ||
74 | { | ||
75 | return ((struct seq_net_private *)seq->private)->net; | ||
76 | } | ||
77 | |||
78 | #endif | 65 | #endif |
79 | #endif | 66 | #endif |
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h new file mode 100644 index 000000000000..4ac52542a563 --- /dev/null +++ b/include/linux/seq_file_net.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef __SEQ_FILE_NET_H__ | ||
2 | #define __SEQ_FILE_NET_H__ | ||
3 | |||
4 | #include <linux/seq_file.h> | ||
5 | |||
6 | struct net; | ||
7 | extern struct net init_net; | ||
8 | |||
9 | struct seq_net_private { | ||
10 | #ifdef CONFIG_NET_NS | ||
11 | struct net *net; | ||
12 | #endif | ||
13 | }; | ||
14 | |||
15 | int seq_open_net(struct inode *, struct file *, | ||
16 | const struct seq_operations *, int); | ||
17 | int seq_release_net(struct inode *, struct file *); | ||
18 | static inline struct net *seq_file_net(struct seq_file *seq) | ||
19 | { | ||
20 | #ifdef CONFIG_NET_NS | ||
21 | return ((struct seq_net_private *)seq->private)->net; | ||
22 | #else | ||
23 | return &init_net; | ||
24 | #endif | ||
25 | } | ||
26 | |||
27 | #endif | ||
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 289942fc6655..7cb094a82456 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -213,6 +213,10 @@ struct uart_ops { | |||
213 | void (*config_port)(struct uart_port *, int); | 213 | void (*config_port)(struct uart_port *, int); |
214 | int (*verify_port)(struct uart_port *, struct serial_struct *); | 214 | int (*verify_port)(struct uart_port *, struct serial_struct *); |
215 | int (*ioctl)(struct uart_port *, unsigned int, unsigned long); | 215 | int (*ioctl)(struct uart_port *, unsigned int, unsigned long); |
216 | #ifdef CONFIG_CONSOLE_POLL | ||
217 | void (*poll_put_char)(struct uart_port *, unsigned char); | ||
218 | int (*poll_get_char)(struct uart_port *); | ||
219 | #endif | ||
216 | }; | 220 | }; |
217 | 221 | ||
218 | #define UART_CONFIG_TYPE (1 << 0) | 222 | #define UART_CONFIG_TYPE (1 << 0) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bbd8d0027e2f..11fd9f2c4093 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -256,7 +256,10 @@ struct sk_buff { | |||
256 | ktime_t tstamp; | 256 | ktime_t tstamp; |
257 | struct net_device *dev; | 257 | struct net_device *dev; |
258 | 258 | ||
259 | struct dst_entry *dst; | 259 | union { |
260 | struct dst_entry *dst; | ||
261 | struct rtable *rtable; | ||
262 | }; | ||
260 | struct sec_path *sp; | 263 | struct sec_path *sp; |
261 | 264 | ||
262 | /* | 265 | /* |
@@ -310,7 +313,10 @@ struct sk_buff { | |||
310 | __u16 tc_verd; /* traffic control verdict */ | 313 | __u16 tc_verd; /* traffic control verdict */ |
311 | #endif | 314 | #endif |
312 | #endif | 315 | #endif |
313 | /* 2 byte hole */ | 316 | #ifdef CONFIG_IPV6_NDISC_NODETYPE |
317 | __u8 ndisc_nodetype:2; | ||
318 | #endif | ||
319 | /* 14 bit hole */ | ||
314 | 320 | ||
315 | #ifdef CONFIG_NET_DMA | 321 | #ifdef CONFIG_NET_DMA |
316 | dma_cookie_t dma_cookie; | 322 | dma_cookie_t dma_cookie; |
@@ -657,11 +663,21 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, | |||
657 | } | 663 | } |
658 | 664 | ||
659 | /* | 665 | /* |
660 | * Insert an sk_buff at the start of a list. | 666 | * Insert an sk_buff on a list. |
661 | * | 667 | * |
662 | * The "__skb_xxxx()" functions are the non-atomic ones that | 668 | * The "__skb_xxxx()" functions are the non-atomic ones that |
663 | * can only be called with interrupts disabled. | 669 | * can only be called with interrupts disabled. |
664 | */ | 670 | */ |
671 | extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); | ||
672 | static inline void __skb_insert(struct sk_buff *newsk, | ||
673 | struct sk_buff *prev, struct sk_buff *next, | ||
674 | struct sk_buff_head *list) | ||
675 | { | ||
676 | newsk->next = next; | ||
677 | newsk->prev = prev; | ||
678 | next->prev = prev->next = newsk; | ||
679 | list->qlen++; | ||
680 | } | ||
665 | 681 | ||
666 | /** | 682 | /** |
667 | * __skb_queue_after - queue a buffer at the list head | 683 | * __skb_queue_after - queue a buffer at the list head |
@@ -678,13 +694,17 @@ static inline void __skb_queue_after(struct sk_buff_head *list, | |||
678 | struct sk_buff *prev, | 694 | struct sk_buff *prev, |
679 | struct sk_buff *newsk) | 695 | struct sk_buff *newsk) |
680 | { | 696 | { |
681 | struct sk_buff *next; | 697 | __skb_insert(newsk, prev, prev->next, list); |
682 | list->qlen++; | 698 | } |
683 | 699 | ||
684 | next = prev->next; | 700 | extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, |
685 | newsk->next = next; | 701 | struct sk_buff_head *list); |
686 | newsk->prev = prev; | 702 | |
687 | next->prev = prev->next = newsk; | 703 | static inline void __skb_queue_before(struct sk_buff_head *list, |
704 | struct sk_buff *next, | ||
705 | struct sk_buff *newsk) | ||
706 | { | ||
707 | __skb_insert(newsk, next->prev, next, list); | ||
688 | } | 708 | } |
689 | 709 | ||
690 | /** | 710 | /** |
@@ -718,66 +738,7 @@ extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); | |||
718 | static inline void __skb_queue_tail(struct sk_buff_head *list, | 738 | static inline void __skb_queue_tail(struct sk_buff_head *list, |
719 | struct sk_buff *newsk) | 739 | struct sk_buff *newsk) |
720 | { | 740 | { |
721 | struct sk_buff *prev, *next; | 741 | __skb_queue_before(list, (struct sk_buff *)list, newsk); |
722 | |||
723 | list->qlen++; | ||
724 | next = (struct sk_buff *)list; | ||
725 | prev = next->prev; | ||
726 | newsk->next = next; | ||
727 | newsk->prev = prev; | ||
728 | next->prev = prev->next = newsk; | ||
729 | } | ||
730 | |||
731 | |||
732 | /** | ||
733 | * __skb_dequeue - remove from the head of the queue | ||
734 | * @list: list to dequeue from | ||
735 | * | ||
736 | * Remove the head of the list. This function does not take any locks | ||
737 | * so must be used with appropriate locks held only. The head item is | ||
738 | * returned or %NULL if the list is empty. | ||
739 | */ | ||
740 | extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); | ||
741 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) | ||
742 | { | ||
743 | struct sk_buff *next, *prev, *result; | ||
744 | |||
745 | prev = (struct sk_buff *) list; | ||
746 | next = prev->next; | ||
747 | result = NULL; | ||
748 | if (next != prev) { | ||
749 | result = next; | ||
750 | next = next->next; | ||
751 | list->qlen--; | ||
752 | next->prev = prev; | ||
753 | prev->next = next; | ||
754 | result->next = result->prev = NULL; | ||
755 | } | ||
756 | return result; | ||
757 | } | ||
758 | |||
759 | |||
760 | /* | ||
761 | * Insert a packet on a list. | ||
762 | */ | ||
763 | extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); | ||
764 | static inline void __skb_insert(struct sk_buff *newsk, | ||
765 | struct sk_buff *prev, struct sk_buff *next, | ||
766 | struct sk_buff_head *list) | ||
767 | { | ||
768 | newsk->next = next; | ||
769 | newsk->prev = prev; | ||
770 | next->prev = prev->next = newsk; | ||
771 | list->qlen++; | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * Place a packet after a given packet in a list. | ||
776 | */ | ||
777 | extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); | ||
778 | static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) | ||
779 | { | ||
780 | __skb_insert(newsk, old, old->next, list); | ||
781 | } | 742 | } |
782 | 743 | ||
783 | /* | 744 | /* |
@@ -797,8 +758,22 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) | |||
797 | prev->next = next; | 758 | prev->next = next; |
798 | } | 759 | } |
799 | 760 | ||
800 | 761 | /** | |
801 | /* XXX: more streamlined implementation */ | 762 | * __skb_dequeue - remove from the head of the queue |
763 | * @list: list to dequeue from | ||
764 | * | ||
765 | * Remove the head of the list. This function does not take any locks | ||
766 | * so must be used with appropriate locks held only. The head item is | ||
767 | * returned or %NULL if the list is empty. | ||
768 | */ | ||
769 | extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); | ||
770 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) | ||
771 | { | ||
772 | struct sk_buff *skb = skb_peek(list); | ||
773 | if (skb) | ||
774 | __skb_unlink(skb, list); | ||
775 | return skb; | ||
776 | } | ||
802 | 777 | ||
803 | /** | 778 | /** |
804 | * __skb_dequeue_tail - remove from the tail of the queue | 779 | * __skb_dequeue_tail - remove from the tail of the queue |
@@ -889,6 +864,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) | |||
889 | /* | 864 | /* |
890 | * Add data to an sk_buff | 865 | * Add data to an sk_buff |
891 | */ | 866 | */ |
867 | extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); | ||
892 | static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) | 868 | static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) |
893 | { | 869 | { |
894 | unsigned char *tmp = skb_tail_pointer(skb); | 870 | unsigned char *tmp = skb_tail_pointer(skb); |
@@ -898,26 +874,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) | |||
898 | return tmp; | 874 | return tmp; |
899 | } | 875 | } |
900 | 876 | ||
901 | /** | 877 | extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); |
902 | * skb_put - add data to a buffer | ||
903 | * @skb: buffer to use | ||
904 | * @len: amount of data to add | ||
905 | * | ||
906 | * This function extends the used data area of the buffer. If this would | ||
907 | * exceed the total buffer size the kernel will panic. A pointer to the | ||
908 | * first byte of the extra data is returned. | ||
909 | */ | ||
910 | static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) | ||
911 | { | ||
912 | unsigned char *tmp = skb_tail_pointer(skb); | ||
913 | SKB_LINEAR_ASSERT(skb); | ||
914 | skb->tail += len; | ||
915 | skb->len += len; | ||
916 | if (unlikely(skb->tail > skb->end)) | ||
917 | skb_over_panic(skb, len, current_text_addr()); | ||
918 | return tmp; | ||
919 | } | ||
920 | |||
921 | static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) | 878 | static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) |
922 | { | 879 | { |
923 | skb->data -= len; | 880 | skb->data -= len; |
@@ -925,24 +882,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) | |||
925 | return skb->data; | 882 | return skb->data; |
926 | } | 883 | } |
927 | 884 | ||
928 | /** | 885 | extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); |
929 | * skb_push - add data to the start of a buffer | ||
930 | * @skb: buffer to use | ||
931 | * @len: amount of data to add | ||
932 | * | ||
933 | * This function extends the used data area of the buffer at the buffer | ||
934 | * start. If this would exceed the total buffer headroom the kernel will | ||
935 | * panic. A pointer to the first byte of the extra data is returned. | ||
936 | */ | ||
937 | static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) | ||
938 | { | ||
939 | skb->data -= len; | ||
940 | skb->len += len; | ||
941 | if (unlikely(skb->data<skb->head)) | ||
942 | skb_under_panic(skb, len, current_text_addr()); | ||
943 | return skb->data; | ||
944 | } | ||
945 | |||
946 | static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) | 886 | static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) |
947 | { | 887 | { |
948 | skb->len -= len; | 888 | skb->len -= len; |
@@ -950,21 +890,6 @@ static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) | |||
950 | return skb->data += len; | 890 | return skb->data += len; |
951 | } | 891 | } |
952 | 892 | ||
953 | /** | ||
954 | * skb_pull - remove data from the start of a buffer | ||
955 | * @skb: buffer to use | ||
956 | * @len: amount of data to remove | ||
957 | * | ||
958 | * This function removes data from the start of a buffer, returning | ||
959 | * the memory to the headroom. A pointer to the next data in the buffer | ||
960 | * is returned. Once the data has been pulled future pushes will overwrite | ||
961 | * the old data. | ||
962 | */ | ||
963 | static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) | ||
964 | { | ||
965 | return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); | ||
966 | } | ||
967 | |||
968 | extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); | 893 | extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); |
969 | 894 | ||
970 | static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) | 895 | static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) |
@@ -1205,21 +1130,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) | |||
1205 | skb_set_tail_pointer(skb, len); | 1130 | skb_set_tail_pointer(skb, len); |
1206 | } | 1131 | } |
1207 | 1132 | ||
1208 | /** | 1133 | extern void skb_trim(struct sk_buff *skb, unsigned int len); |
1209 | * skb_trim - remove end from a buffer | ||
1210 | * @skb: buffer to alter | ||
1211 | * @len: new length | ||
1212 | * | ||
1213 | * Cut the length of a buffer down by removing data from the tail. If | ||
1214 | * the buffer is already under the length specified it is not modified. | ||
1215 | * The skb must be linear. | ||
1216 | */ | ||
1217 | static inline void skb_trim(struct sk_buff *skb, unsigned int len) | ||
1218 | { | ||
1219 | if (skb->len > len) | ||
1220 | __skb_trim(skb, len); | ||
1221 | } | ||
1222 | |||
1223 | 1134 | ||
1224 | static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) | 1135 | static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) |
1225 | { | 1136 | { |
@@ -1302,22 +1213,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, | |||
1302 | return skb; | 1213 | return skb; |
1303 | } | 1214 | } |
1304 | 1215 | ||
1305 | /** | 1216 | extern struct sk_buff *dev_alloc_skb(unsigned int length); |
1306 | * dev_alloc_skb - allocate an skbuff for receiving | ||
1307 | * @length: length to allocate | ||
1308 | * | ||
1309 | * Allocate a new &sk_buff and assign it a usage count of one. The | ||
1310 | * buffer has unspecified headroom built in. Users should allocate | ||
1311 | * the headroom they think they need without accounting for the | ||
1312 | * built in space. The built in space is used for optimisations. | ||
1313 | * | ||
1314 | * %NULL is returned if there is no free memory. Although this function | ||
1315 | * allocates memory it can be called from an interrupt. | ||
1316 | */ | ||
1317 | static inline struct sk_buff *dev_alloc_skb(unsigned int length) | ||
1318 | { | ||
1319 | return __dev_alloc_skb(length, GFP_ATOMIC); | ||
1320 | } | ||
1321 | 1217 | ||
1322 | extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | 1218 | extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
1323 | unsigned int length, gfp_t gfp_mask); | 1219 | unsigned int length, gfp_t gfp_mask); |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index b00c1c73eb0a..79d59c937fac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -45,9 +45,9 @@ struct kmem_cache_cpu { | |||
45 | struct kmem_cache_node { | 45 | struct kmem_cache_node { |
46 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | 46 | spinlock_t list_lock; /* Protect partial list and nr_partial */ |
47 | unsigned long nr_partial; | 47 | unsigned long nr_partial; |
48 | atomic_long_t nr_slabs; | ||
49 | struct list_head partial; | 48 | struct list_head partial; |
50 | #ifdef CONFIG_SLUB_DEBUG | 49 | #ifdef CONFIG_SLUB_DEBUG |
50 | atomic_long_t nr_slabs; | ||
51 | struct list_head full; | 51 | struct list_head full; |
52 | #endif | 52 | #endif |
53 | }; | 53 | }; |
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h new file mode 100644 index 000000000000..8e0556b8781c --- /dev/null +++ b/include/linux/smc91x.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __SMC91X_H__ | ||
2 | #define __SMC91X_H__ | ||
3 | |||
4 | #define SMC91X_USE_8BIT (1 << 0) | ||
5 | #define SMC91X_USE_16BIT (1 << 1) | ||
6 | #define SMC91X_USE_32BIT (1 << 2) | ||
7 | |||
8 | struct smc91x_platdata { | ||
9 | unsigned long flags; | ||
10 | unsigned long irq_flags; /* IRQF_... */ | ||
11 | }; | ||
12 | |||
13 | #endif /* __SMC91X_H__ */ | ||
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 1129ee0a7180..d311a090fae7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -296,43 +296,6 @@ do { \ | |||
296 | }) | 296 | }) |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Locks two spinlocks l1 and l2. | ||
300 | * l1_first indicates if spinlock l1 should be taken first. | ||
301 | */ | ||
302 | static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, | ||
303 | bool l1_first) | ||
304 | __acquires(l1) | ||
305 | __acquires(l2) | ||
306 | { | ||
307 | if (l1_first) { | ||
308 | spin_lock(l1); | ||
309 | spin_lock(l2); | ||
310 | } else { | ||
311 | spin_lock(l2); | ||
312 | spin_lock(l1); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Unlocks two spinlocks l1 and l2. | ||
318 | * l1_taken_first indicates if spinlock l1 was taken first and therefore | ||
319 | * should be released after spinlock l2. | ||
320 | */ | ||
321 | static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, | ||
322 | bool l1_taken_first) | ||
323 | __releases(l1) | ||
324 | __releases(l2) | ||
325 | { | ||
326 | if (l1_taken_first) { | ||
327 | spin_unlock(l2); | ||
328 | spin_unlock(l1); | ||
329 | } else { | ||
330 | spin_unlock(l1); | ||
331 | spin_unlock(l2); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Pull the atomic_t declaration: | 299 | * Pull the atomic_t declaration: |
337 | * (asm-mips/atomic.h needs above definitions) | 300 | * (asm-mips/atomic.h needs above definitions) |
338 | */ | 301 | */ |
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 20add65215af..50dfd0dc4093 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h | |||
@@ -72,10 +72,18 @@ struct ssb_device; | |||
72 | /* Lowlevel read/write operations on the device MMIO. | 72 | /* Lowlevel read/write operations on the device MMIO. |
73 | * Internal, don't use that outside of ssb. */ | 73 | * Internal, don't use that outside of ssb. */ |
74 | struct ssb_bus_ops { | 74 | struct ssb_bus_ops { |
75 | u8 (*read8)(struct ssb_device *dev, u16 offset); | ||
75 | u16 (*read16)(struct ssb_device *dev, u16 offset); | 76 | u16 (*read16)(struct ssb_device *dev, u16 offset); |
76 | u32 (*read32)(struct ssb_device *dev, u16 offset); | 77 | u32 (*read32)(struct ssb_device *dev, u16 offset); |
78 | void (*write8)(struct ssb_device *dev, u16 offset, u8 value); | ||
77 | void (*write16)(struct ssb_device *dev, u16 offset, u16 value); | 79 | void (*write16)(struct ssb_device *dev, u16 offset, u16 value); |
78 | void (*write32)(struct ssb_device *dev, u16 offset, u32 value); | 80 | void (*write32)(struct ssb_device *dev, u16 offset, u32 value); |
81 | #ifdef CONFIG_SSB_BLOCKIO | ||
82 | void (*block_read)(struct ssb_device *dev, void *buffer, | ||
83 | size_t count, u16 offset, u8 reg_width); | ||
84 | void (*block_write)(struct ssb_device *dev, const void *buffer, | ||
85 | size_t count, u16 offset, u8 reg_width); | ||
86 | #endif | ||
79 | }; | 87 | }; |
80 | 88 | ||
81 | 89 | ||
@@ -129,6 +137,10 @@ struct ssb_device { | |||
129 | const struct ssb_bus_ops *ops; | 137 | const struct ssb_bus_ops *ops; |
130 | 138 | ||
131 | struct device *dev; | 139 | struct device *dev; |
140 | /* Pointer to the device that has to be used for | ||
141 | * any DMA related operation. */ | ||
142 | struct device *dma_dev; | ||
143 | |||
132 | struct ssb_bus *bus; | 144 | struct ssb_bus *bus; |
133 | struct ssb_device_id id; | 145 | struct ssb_device_id id; |
134 | 146 | ||
@@ -243,9 +255,9 @@ struct ssb_bus { | |||
243 | /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ | 255 | /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ |
244 | struct pcmcia_device *host_pcmcia; | 256 | struct pcmcia_device *host_pcmcia; |
245 | 257 | ||
246 | #ifdef CONFIG_SSB_PCIHOST | 258 | #ifdef CONFIG_SSB_SPROM |
247 | /* Mutex to protect the SPROM writing. */ | 259 | /* Mutex to protect the SPROM writing. */ |
248 | struct mutex pci_sprom_mutex; | 260 | struct mutex sprom_mutex; |
249 | #endif | 261 | #endif |
250 | 262 | ||
251 | /* ID information about the Chip. */ | 263 | /* ID information about the Chip. */ |
@@ -258,9 +270,6 @@ struct ssb_bus { | |||
258 | struct ssb_device devices[SSB_MAX_NR_CORES]; | 270 | struct ssb_device devices[SSB_MAX_NR_CORES]; |
259 | u8 nr_devices; | 271 | u8 nr_devices; |
260 | 272 | ||
261 | /* Reference count. Number of suspended devices. */ | ||
262 | u8 suspend_cnt; | ||
263 | |||
264 | /* Software ID number for this bus. */ | 273 | /* Software ID number for this bus. */ |
265 | unsigned int busnumber; | 274 | unsigned int busnumber; |
266 | 275 | ||
@@ -332,6 +341,13 @@ extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, | |||
332 | 341 | ||
333 | extern void ssb_bus_unregister(struct ssb_bus *bus); | 342 | extern void ssb_bus_unregister(struct ssb_bus *bus); |
334 | 343 | ||
344 | /* Suspend a SSB bus. | ||
345 | * Call this from the parent bus suspend routine. */ | ||
346 | extern int ssb_bus_suspend(struct ssb_bus *bus); | ||
347 | /* Resume a SSB bus. | ||
348 | * Call this from the parent bus resume routine. */ | ||
349 | extern int ssb_bus_resume(struct ssb_bus *bus); | ||
350 | |||
335 | extern u32 ssb_clockspeed(struct ssb_bus *bus); | 351 | extern u32 ssb_clockspeed(struct ssb_bus *bus); |
336 | 352 | ||
337 | /* Is the device enabled in hardware? */ | 353 | /* Is the device enabled in hardware? */ |
@@ -344,6 +360,10 @@ void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags); | |||
344 | 360 | ||
345 | 361 | ||
346 | /* Device MMIO register read/write functions. */ | 362 | /* Device MMIO register read/write functions. */ |
363 | static inline u8 ssb_read8(struct ssb_device *dev, u16 offset) | ||
364 | { | ||
365 | return dev->ops->read8(dev, offset); | ||
366 | } | ||
347 | static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) | 367 | static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) |
348 | { | 368 | { |
349 | return dev->ops->read16(dev, offset); | 369 | return dev->ops->read16(dev, offset); |
@@ -352,6 +372,10 @@ static inline u32 ssb_read32(struct ssb_device *dev, u16 offset) | |||
352 | { | 372 | { |
353 | return dev->ops->read32(dev, offset); | 373 | return dev->ops->read32(dev, offset); |
354 | } | 374 | } |
375 | static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value) | ||
376 | { | ||
377 | dev->ops->write8(dev, offset, value); | ||
378 | } | ||
355 | static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) | 379 | static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) |
356 | { | 380 | { |
357 | dev->ops->write16(dev, offset, value); | 381 | dev->ops->write16(dev, offset, value); |
@@ -360,6 +384,19 @@ static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value) | |||
360 | { | 384 | { |
361 | dev->ops->write32(dev, offset, value); | 385 | dev->ops->write32(dev, offset, value); |
362 | } | 386 | } |
387 | #ifdef CONFIG_SSB_BLOCKIO | ||
388 | static inline void ssb_block_read(struct ssb_device *dev, void *buffer, | ||
389 | size_t count, u16 offset, u8 reg_width) | ||
390 | { | ||
391 | dev->ops->block_read(dev, buffer, count, offset, reg_width); | ||
392 | } | ||
393 | |||
394 | static inline void ssb_block_write(struct ssb_device *dev, const void *buffer, | ||
395 | size_t count, u16 offset, u8 reg_width) | ||
396 | { | ||
397 | dev->ops->block_write(dev, buffer, count, offset, reg_width); | ||
398 | } | ||
399 | #endif /* CONFIG_SSB_BLOCKIO */ | ||
363 | 400 | ||
364 | 401 | ||
365 | /* Translation (routing) bits that need to be ORed to DMA | 402 | /* Translation (routing) bits that need to be ORed to DMA |
@@ -412,5 +449,12 @@ extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl); | |||
412 | extern u32 ssb_admatch_base(u32 adm); | 449 | extern u32 ssb_admatch_base(u32 adm); |
413 | extern u32 ssb_admatch_size(u32 adm); | 450 | extern u32 ssb_admatch_size(u32 adm); |
414 | 451 | ||
452 | /* PCI device mapping and fixup routines. | ||
453 | * Called from the architecture pcibios init code. | ||
454 | * These are only available on SSB_EMBEDDED configurations. */ | ||
455 | #ifdef CONFIG_SSB_EMBEDDED | ||
456 | int ssb_pcibios_plat_dev_init(struct pci_dev *dev); | ||
457 | int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); | ||
458 | #endif /* CONFIG_SSB_EMBEDDED */ | ||
415 | 459 | ||
416 | #endif /* LINUX_SSB_H_ */ | 460 | #endif /* LINUX_SSB_H_ */ |
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 536851b946f6..7d7e03dcf77c 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h | |||
@@ -367,8 +367,7 @@ static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) | |||
367 | 367 | ||
368 | extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); | 368 | extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); |
369 | 369 | ||
370 | #include <linux/pm.h> | 370 | extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); |
371 | extern void ssb_chipco_suspend(struct ssb_chipcommon *cc, pm_message_t state); | ||
372 | extern void ssb_chipco_resume(struct ssb_chipcommon *cc); | 371 | extern void ssb_chipco_resume(struct ssb_chipcommon *cc); |
373 | 372 | ||
374 | extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, | 373 | extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, |
@@ -390,6 +389,10 @@ extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, | |||
390 | extern void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, | 389 | extern void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, |
391 | u32 ticks); | 390 | u32 ticks); |
392 | 391 | ||
392 | void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value); | ||
393 | |||
394 | u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask); | ||
395 | |||
393 | /* Chipcommon GPIO pin access. */ | 396 | /* Chipcommon GPIO pin access. */ |
394 | u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); | 397 | u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); |
395 | u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); | 398 | u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); |
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h new file mode 100644 index 000000000000..01fbdf5fef22 --- /dev/null +++ b/include/linux/ssb/ssb_driver_gige.h | |||
@@ -0,0 +1,174 @@ | |||
1 | #ifndef LINUX_SSB_DRIVER_GIGE_H_ | ||
2 | #define LINUX_SSB_DRIVER_GIGE_H_ | ||
3 | |||
4 | #include <linux/ssb/ssb.h> | ||
5 | #include <linux/pci.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | |||
8 | |||
9 | #ifdef CONFIG_SSB_DRIVER_GIGE | ||
10 | |||
11 | |||
12 | #define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */ | ||
13 | #define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */ | ||
14 | #define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */ | ||
15 | #define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */ | ||
16 | #define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */ | ||
17 | #define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */ | ||
18 | #define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */ | ||
19 | #define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */ | ||
20 | #define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */ | ||
21 | |||
22 | /* TM Status High flags */ | ||
23 | #define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */ | ||
24 | /* TM Status Low flags */ | ||
25 | #define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */ | ||
26 | #define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */ | ||
27 | #define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */ | ||
28 | |||
29 | /* Boardflags (low) */ | ||
30 | #define SSB_GIGE_BFL_ROBOSWITCH 0x0010 | ||
31 | |||
32 | |||
33 | #define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory" | ||
34 | #define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O" | ||
35 | |||
36 | struct ssb_gige { | ||
37 | struct ssb_device *dev; | ||
38 | |||
39 | spinlock_t lock; | ||
40 | |||
41 | /* True, if the device has an RGMII bus. | ||
42 | * False, if the device has a GMII bus. */ | ||
43 | bool has_rgmii; | ||
44 | |||
45 | /* The PCI controller device. */ | ||
46 | struct pci_controller pci_controller; | ||
47 | struct pci_ops pci_ops; | ||
48 | struct resource mem_resource; | ||
49 | struct resource io_resource; | ||
50 | }; | ||
51 | |||
52 | /* Check whether a PCI device is a SSB Gigabit Ethernet core. */ | ||
53 | extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev); | ||
54 | |||
55 | /* Convert a pci_dev pointer to a ssb_gige pointer. */ | ||
56 | static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) | ||
57 | { | ||
58 | if (!pdev_is_ssb_gige_core(pdev)) | ||
59 | return NULL; | ||
60 | return container_of(pdev->bus->ops, struct ssb_gige, pci_ops); | ||
61 | } | ||
62 | |||
63 | /* Returns whether the PHY is connected by an RGMII bus. */ | ||
64 | static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) | ||
65 | { | ||
66 | struct ssb_gige *dev = pdev_to_ssb_gige(pdev); | ||
67 | return (dev ? dev->has_rgmii : 0); | ||
68 | } | ||
69 | |||
70 | /* Returns whether we have a Roboswitch. */ | ||
71 | static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) | ||
72 | { | ||
73 | struct ssb_gige *dev = pdev_to_ssb_gige(pdev); | ||
74 | if (dev) | ||
75 | return !!(dev->dev->bus->sprom.boardflags_lo & | ||
76 | SSB_GIGE_BFL_ROBOSWITCH); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | /* Returns whether we can only do one DMA at once. */ | ||
81 | static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) | ||
82 | { | ||
83 | struct ssb_gige *dev = pdev_to_ssb_gige(pdev); | ||
84 | if (dev) | ||
85 | return ((dev->dev->bus->chip_id == 0x4785) && | ||
86 | (dev->dev->bus->chip_rev < 2)); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* Returns whether we must flush posted writes. */ | ||
91 | static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) | ||
92 | { | ||
93 | struct ssb_gige *dev = pdev_to_ssb_gige(pdev); | ||
94 | if (dev) | ||
95 | return (dev->dev->bus->chip_id == 0x4785); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | extern char * nvram_get(const char *name); | ||
100 | /* Get the device MAC address */ | ||
101 | static inline void ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr) | ||
102 | { | ||
103 | #ifdef CONFIG_BCM947XX | ||
104 | char *res = nvram_get("et0macaddr"); | ||
105 | if (res) | ||
106 | memcpy(macaddr, res, 6); | ||
107 | #endif | ||
108 | } | ||
109 | |||
110 | extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, | ||
111 | struct pci_dev *pdev); | ||
112 | extern int ssb_gige_map_irq(struct ssb_device *sdev, | ||
113 | const struct pci_dev *pdev); | ||
114 | |||
115 | /* The GigE driver is not a standalone module, because we don't have support | ||
116 | * for unregistering the driver. So we could not unload the module anyway. */ | ||
117 | extern int ssb_gige_init(void); | ||
118 | static inline void ssb_gige_exit(void) | ||
119 | { | ||
120 | /* Currently we can not unregister the GigE driver, | ||
121 | * because we can not unregister the PCI bridge. */ | ||
122 | BUG(); | ||
123 | } | ||
124 | |||
125 | |||
126 | #else /* CONFIG_SSB_DRIVER_GIGE */ | ||
127 | /* Gigabit Ethernet driver disabled */ | ||
128 | |||
129 | |||
130 | static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev, | ||
131 | struct pci_dev *pdev) | ||
132 | { | ||
133 | return -ENOSYS; | ||
134 | } | ||
135 | static inline int ssb_gige_map_irq(struct ssb_device *sdev, | ||
136 | const struct pci_dev *pdev) | ||
137 | { | ||
138 | return -ENOSYS; | ||
139 | } | ||
140 | static inline int ssb_gige_init(void) | ||
141 | { | ||
142 | return 0; | ||
143 | } | ||
144 | static inline void ssb_gige_exit(void) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev) | ||
149 | { | ||
150 | return 0; | ||
151 | } | ||
152 | static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev) | ||
153 | { | ||
154 | return NULL; | ||
155 | } | ||
156 | static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev) | ||
157 | { | ||
158 | return 0; | ||
159 | } | ||
160 | static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev) | ||
161 | { | ||
162 | return 0; | ||
163 | } | ||
164 | static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev) | ||
165 | { | ||
166 | return 0; | ||
167 | } | ||
168 | static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) | ||
169 | { | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | #endif /* CONFIG_SSB_DRIVER_GIGE */ | ||
174 | #endif /* LINUX_SSB_DRIVER_GIGE_H_ */ | ||
diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h index 5e25bac4ed31..41e330e51c2a 100644 --- a/include/linux/ssb/ssb_driver_pci.h +++ b/include/linux/ssb/ssb_driver_pci.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef LINUX_SSB_PCICORE_H_ | 1 | #ifndef LINUX_SSB_PCICORE_H_ |
2 | #define LINUX_SSB_PCICORE_H_ | 2 | #define LINUX_SSB_PCICORE_H_ |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
6 | struct pci_dev; | ||
7 | |||
8 | |||
4 | #ifdef CONFIG_SSB_DRIVER_PCICORE | 9 | #ifdef CONFIG_SSB_DRIVER_PCICORE |
5 | 10 | ||
6 | /* PCI core registers. */ | 11 | /* PCI core registers. */ |
@@ -88,6 +93,9 @@ extern void ssb_pcicore_init(struct ssb_pcicore *pc); | |||
88 | extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, | 93 | extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, |
89 | struct ssb_device *dev); | 94 | struct ssb_device *dev); |
90 | 95 | ||
96 | int ssb_pcicore_plat_dev_init(struct pci_dev *d); | ||
97 | int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); | ||
98 | |||
91 | 99 | ||
92 | #else /* CONFIG_SSB_DRIVER_PCICORE */ | 100 | #else /* CONFIG_SSB_DRIVER_PCICORE */ |
93 | 101 | ||
@@ -107,5 +115,16 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, | |||
107 | return 0; | 115 | return 0; |
108 | } | 116 | } |
109 | 117 | ||
118 | static inline | ||
119 | int ssb_pcicore_plat_dev_init(struct pci_dev *d) | ||
120 | { | ||
121 | return -ENODEV; | ||
122 | } | ||
123 | static inline | ||
124 | int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
125 | { | ||
126 | return -ENODEV; | ||
127 | } | ||
128 | |||
110 | #endif /* CONFIG_SSB_DRIVER_PCICORE */ | 129 | #endif /* CONFIG_SSB_DRIVER_PCICORE */ |
111 | #endif /* LINUX_SSB_PCICORE_H_ */ | 130 | #endif /* LINUX_SSB_PCICORE_H_ */ |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 08027f1d7f31..d96d9b122304 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -239,6 +239,11 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) | |||
239 | return (struct tcp_request_sock *)req; | 239 | return (struct tcp_request_sock *)req; |
240 | } | 240 | } |
241 | 241 | ||
242 | struct tcp_deferred_accept_info { | ||
243 | struct sock *listen_sk; | ||
244 | struct request_sock *request; | ||
245 | }; | ||
246 | |||
242 | struct tcp_sock { | 247 | struct tcp_sock { |
243 | /* inet_connection_sock has to be the first member of tcp_sock */ | 248 | /* inet_connection_sock has to be the first member of tcp_sock */ |
244 | struct inet_connection_sock inet_conn; | 249 | struct inet_connection_sock inet_conn; |
@@ -374,6 +379,8 @@ struct tcp_sock { | |||
374 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ | 379 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ |
375 | int linger2; | 380 | int linger2; |
376 | 381 | ||
382 | struct tcp_deferred_accept_info defer_tcp_accept; | ||
383 | |||
377 | unsigned long last_synq_overflow; | 384 | unsigned long last_synq_overflow; |
378 | 385 | ||
379 | u32 tso_deferred; | 386 | u32 tso_deferred; |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 818ca1cf0b6d..90c1c191ea69 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -50,7 +50,7 @@ struct thermal_cooling_device_ops { | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | #define THERMAL_TRIPS_NONE -1 | 52 | #define THERMAL_TRIPS_NONE -1 |
53 | #define THERMAL_MAX_TRIPS 10 | 53 | #define THERMAL_MAX_TRIPS 12 |
54 | #define THERMAL_NAME_LENGTH 20 | 54 | #define THERMAL_NAME_LENGTH 20 |
55 | struct thermal_cooling_device { | 55 | struct thermal_cooling_device { |
56 | int id; | 56 | int id; |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 421323e5a2d6..accd7bad35b0 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -9,6 +9,9 @@ | |||
9 | 9 | ||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | 11 | ||
12 | struct timespec; | ||
13 | struct compat_timespec; | ||
14 | |||
12 | /* | 15 | /* |
13 | * System call restart block. | 16 | * System call restart block. |
14 | */ | 17 | */ |
@@ -26,6 +29,15 @@ struct restart_block { | |||
26 | u32 bitset; | 29 | u32 bitset; |
27 | u64 time; | 30 | u64 time; |
28 | } futex; | 31 | } futex; |
32 | /* For nanosleep */ | ||
33 | struct { | ||
34 | clockid_t index; | ||
35 | struct timespec __user *rmtp; | ||
36 | #ifdef CONFIG_COMPAT | ||
37 | struct compat_timespec __user *compat_rmtp; | ||
38 | #endif | ||
39 | u64 expires; | ||
40 | } nanosleep; | ||
29 | }; | 41 | }; |
30 | }; | 42 | }; |
31 | 43 | ||
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h index 1d6cc22e5f42..6696cf79c4f7 100644 --- a/include/linux/transport_class.h +++ b/include/linux/transport_class.h | |||
@@ -86,9 +86,10 @@ static inline int transport_container_register(struct transport_container *tc) | |||
86 | return attribute_container_register(&tc->ac); | 86 | return attribute_container_register(&tc->ac); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int transport_container_unregister(struct transport_container *tc) | 89 | static inline void transport_container_unregister(struct transport_container *tc) |
90 | { | 90 | { |
91 | return attribute_container_unregister(&tc->ac); | 91 | if (unlikely(attribute_container_unregister(&tc->ac))) |
92 | BUG(); | ||
92 | } | 93 | } |
93 | 94 | ||
94 | int transport_class_register(struct transport_class *); | 95 | int transport_class_register(struct transport_class *); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 85c95cd39bc3..21f69aca4505 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -125,6 +125,7 @@ | |||
125 | #include <linux/cdev.h> | 125 | #include <linux/cdev.h> |
126 | 126 | ||
127 | struct tty_struct; | 127 | struct tty_struct; |
128 | struct tty_driver; | ||
128 | 129 | ||
129 | struct tty_operations { | 130 | struct tty_operations { |
130 | int (*open)(struct tty_struct * tty, struct file * filp); | 131 | int (*open)(struct tty_struct * tty, struct file * filp); |
@@ -157,6 +158,11 @@ struct tty_operations { | |||
157 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 158 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
158 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 159 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
159 | unsigned int set, unsigned int clear); | 160 | unsigned int set, unsigned int clear); |
161 | #ifdef CONFIG_CONSOLE_POLL | ||
162 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | ||
163 | int (*poll_get_char)(struct tty_driver *driver, int line); | ||
164 | void (*poll_put_char)(struct tty_driver *driver, int line, char ch); | ||
165 | #endif | ||
160 | }; | 166 | }; |
161 | 167 | ||
162 | struct tty_driver { | 168 | struct tty_driver { |
@@ -220,6 +226,11 @@ struct tty_driver { | |||
220 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 226 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
221 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 227 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
222 | unsigned int set, unsigned int clear); | 228 | unsigned int set, unsigned int clear); |
229 | #ifdef CONFIG_CONSOLE_POLL | ||
230 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | ||
231 | int (*poll_get_char)(struct tty_driver *driver, int line); | ||
232 | void (*poll_put_char)(struct tty_driver *driver, int line, char ch); | ||
233 | #endif | ||
223 | 234 | ||
224 | struct list_head tty_drivers; | 235 | struct list_head tty_drivers; |
225 | }; | 236 | }; |
@@ -230,6 +241,7 @@ struct tty_driver *alloc_tty_driver(int lines); | |||
230 | void put_tty_driver(struct tty_driver *driver); | 241 | void put_tty_driver(struct tty_driver *driver); |
231 | void tty_set_operations(struct tty_driver *driver, | 242 | void tty_set_operations(struct tty_driver *driver, |
232 | const struct tty_operations *op); | 243 | const struct tty_operations *op); |
244 | extern struct tty_driver *tty_find_polling_driver(char *name, int *line); | ||
233 | 245 | ||
234 | /* tty driver magic number */ | 246 | /* tty driver magic number */ |
235 | #define TTY_DRIVER_MAGIC 0x5402 | 247 | #define TTY_DRIVER_MAGIC 0x5402 |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 975c963e5789..fec6decfb983 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -84,4 +84,26 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
84 | ret; \ | 84 | ret; \ |
85 | }) | 85 | }) |
86 | 86 | ||
87 | /* | ||
88 | * probe_kernel_read(): safely attempt to read from a location | ||
89 | * @dst: pointer to the buffer that shall take the data | ||
90 | * @src: address to read from | ||
91 | * @size: size of the data chunk | ||
92 | * | ||
93 | * Safely read from address @src to the buffer at @dst. If a kernel fault | ||
94 | * happens, handle that and return -EFAULT. | ||
95 | */ | ||
96 | extern long probe_kernel_read(void *dst, void *src, size_t size); | ||
97 | |||
98 | /* | ||
99 | * probe_kernel_write(): safely attempt to write to a location | ||
100 | * @dst: address to write to | ||
101 | * @src: pointer to the data that shall be written | ||
102 | * @size: size of the data chunk | ||
103 | * | ||
104 | * Safely write to address @dst from the buffer at @src. If a kernel fault | ||
105 | * happens, handle that and return -EFAULT. | ||
106 | */ | ||
107 | extern long probe_kernel_write(void *dst, void *src, size_t size); | ||
108 | |||
87 | #endif /* __LINUX_UACCESS_H__ */ | 109 | #endif /* __LINUX_UACCESS_H__ */ |
diff --git a/include/linux/udp.h b/include/linux/udp.h index 8ec703f462da..581ca2c14c52 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h | |||
@@ -26,15 +26,6 @@ struct udphdr { | |||
26 | __sum16 check; | 26 | __sum16 check; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | #ifdef __KERNEL__ | ||
30 | #include <linux/skbuff.h> | ||
31 | |||
32 | static inline struct udphdr *udp_hdr(const struct sk_buff *skb) | ||
33 | { | ||
34 | return (struct udphdr *)skb_transport_header(skb); | ||
35 | } | ||
36 | #endif | ||
37 | |||
38 | /* UDP socket options */ | 29 | /* UDP socket options */ |
39 | #define UDP_CORK 1 /* Never send partially complete segments */ | 30 | #define UDP_CORK 1 /* Never send partially complete segments */ |
40 | #define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ | 31 | #define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ |
@@ -45,9 +36,14 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb) | |||
45 | #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ | 36 | #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ |
46 | 37 | ||
47 | #ifdef __KERNEL__ | 38 | #ifdef __KERNEL__ |
48 | #include <linux/types.h> | ||
49 | |||
50 | #include <net/inet_sock.h> | 39 | #include <net/inet_sock.h> |
40 | #include <linux/skbuff.h> | ||
41 | |||
42 | static inline struct udphdr *udp_hdr(const struct sk_buff *skb) | ||
43 | { | ||
44 | return (struct udphdr *)skb_transport_header(skb); | ||
45 | } | ||
46 | |||
51 | #define UDP_HTABLE_SIZE 128 | 47 | #define UDP_HTABLE_SIZE 128 |
52 | 48 | ||
53 | struct udp_sock { | 49 | struct udp_sock { |
@@ -82,6 +78,7 @@ static inline struct udp_sock *udp_sk(const struct sock *sk) | |||
82 | { | 78 | { |
83 | return (struct udp_sock *)sk; | 79 | return (struct udp_sock *)sk; |
84 | } | 80 | } |
81 | |||
85 | #define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) | 82 | #define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag) |
86 | 83 | ||
87 | #endif | 84 | #endif |
diff --git a/include/linux/wireless.h b/include/linux/wireless.h index 3160dfed73ca..2864b1699ecc 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h | |||
@@ -455,6 +455,7 @@ | |||
455 | #define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */ | 455 | #define IW_MODE_REPEAT 4 /* Wireless Repeater (forwarder) */ |
456 | #define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */ | 456 | #define IW_MODE_SECOND 5 /* Secondary master/repeater (backup) */ |
457 | #define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */ | 457 | #define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */ |
458 | #define IW_MODE_MESH 7 /* Mesh (IEEE 802.11s) network */ | ||
458 | 459 | ||
459 | /* Statistics flags (bitmask in updated) */ | 460 | /* Statistics flags (bitmask in updated) */ |
460 | #define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */ | 461 | #define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */ |
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index e31b8c84f2c9..0c82c80b277f 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h | |||
@@ -113,7 +113,8 @@ enum | |||
113 | { | 113 | { |
114 | XFRM_POLICY_TYPE_MAIN = 0, | 114 | XFRM_POLICY_TYPE_MAIN = 0, |
115 | XFRM_POLICY_TYPE_SUB = 1, | 115 | XFRM_POLICY_TYPE_SUB = 1, |
116 | XFRM_POLICY_TYPE_MAX = 2 | 116 | XFRM_POLICY_TYPE_MAX = 2, |
117 | XFRM_POLICY_TYPE_ANY = 255 | ||
117 | }; | 118 | }; |
118 | 119 | ||
119 | enum | 120 | enum |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 496503c03846..0a2f0372df31 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
@@ -55,9 +55,12 @@ struct prefix_info { | |||
55 | extern int addrconf_init(void); | 55 | extern int addrconf_init(void); |
56 | extern void addrconf_cleanup(void); | 56 | extern void addrconf_cleanup(void); |
57 | 57 | ||
58 | extern int addrconf_add_ifaddr(void __user *arg); | 58 | extern int addrconf_add_ifaddr(struct net *net, |
59 | extern int addrconf_del_ifaddr(void __user *arg); | 59 | void __user *arg); |
60 | extern int addrconf_set_dstaddr(void __user *arg); | 60 | extern int addrconf_del_ifaddr(struct net *net, |
61 | void __user *arg); | ||
62 | extern int addrconf_set_dstaddr(struct net *net, | ||
63 | void __user *arg); | ||
61 | 64 | ||
62 | extern int ipv6_chk_addr(struct net *net, | 65 | extern int ipv6_chk_addr(struct net *net, |
63 | struct in6_addr *addr, | 66 | struct in6_addr *addr, |
@@ -68,16 +71,18 @@ extern int ipv6_chk_addr(struct net *net, | |||
68 | extern int ipv6_chk_home_addr(struct net *net, | 71 | extern int ipv6_chk_home_addr(struct net *net, |
69 | struct in6_addr *addr); | 72 | struct in6_addr *addr); |
70 | #endif | 73 | #endif |
74 | |||
75 | extern int ipv6_chk_prefix(struct in6_addr *addr, | ||
76 | struct net_device *dev); | ||
77 | |||
71 | extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, | 78 | extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, |
72 | struct in6_addr *addr, | 79 | const struct in6_addr *addr, |
73 | struct net_device *dev, | 80 | struct net_device *dev, |
74 | int strict); | 81 | int strict); |
75 | 82 | ||
76 | extern int ipv6_get_saddr(struct dst_entry *dst, | ||
77 | struct in6_addr *daddr, | ||
78 | struct in6_addr *saddr); | ||
79 | extern int ipv6_dev_get_saddr(struct net_device *dev, | 83 | extern int ipv6_dev_get_saddr(struct net_device *dev, |
80 | struct in6_addr *daddr, | 84 | const struct in6_addr *daddr, |
85 | unsigned int srcprefs, | ||
81 | struct in6_addr *saddr); | 86 | struct in6_addr *saddr); |
82 | extern int ipv6_get_lladdr(struct net_device *dev, | 87 | extern int ipv6_get_lladdr(struct net_device *dev, |
83 | struct in6_addr *addr, | 88 | struct in6_addr *addr, |
@@ -100,31 +105,31 @@ extern u32 ipv6_addr_label(const struct in6_addr *addr, | |||
100 | /* | 105 | /* |
101 | * multicast prototypes (mcast.c) | 106 | * multicast prototypes (mcast.c) |
102 | */ | 107 | */ |
103 | extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, | 108 | extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, |
104 | struct in6_addr *addr); | 109 | const struct in6_addr *addr); |
105 | extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex, | 110 | extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex, |
106 | struct in6_addr *addr); | 111 | const struct in6_addr *addr); |
107 | extern void ipv6_sock_mc_close(struct sock *sk); | 112 | extern void ipv6_sock_mc_close(struct sock *sk); |
108 | extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr, | 113 | extern int inet6_mc_check(struct sock *sk, |
109 | struct in6_addr *src_addr); | 114 | const struct in6_addr *mc_addr, |
115 | const struct in6_addr *src_addr); | ||
110 | 116 | ||
111 | extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr); | 117 | extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr); |
112 | extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr); | 118 | extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr); |
113 | extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr); | 119 | extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr); |
114 | extern void ipv6_mc_up(struct inet6_dev *idev); | 120 | extern void ipv6_mc_up(struct inet6_dev *idev); |
115 | extern void ipv6_mc_down(struct inet6_dev *idev); | 121 | extern void ipv6_mc_down(struct inet6_dev *idev); |
116 | extern void ipv6_mc_init_dev(struct inet6_dev *idev); | 122 | extern void ipv6_mc_init_dev(struct inet6_dev *idev); |
117 | extern void ipv6_mc_destroy_dev(struct inet6_dev *idev); | 123 | extern void ipv6_mc_destroy_dev(struct inet6_dev *idev); |
118 | extern void addrconf_dad_failure(struct inet6_ifaddr *ifp); | 124 | extern void addrconf_dad_failure(struct inet6_ifaddr *ifp); |
119 | 125 | ||
120 | extern int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *group, | 126 | extern int ipv6_chk_mcast_addr(struct net_device *dev, |
121 | struct in6_addr *src_addr); | 127 | const struct in6_addr *group, |
128 | const struct in6_addr *src_addr); | ||
122 | extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); | 129 | extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); |
123 | 130 | ||
124 | extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); | 131 | extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); |
125 | 132 | ||
126 | extern int ipv6_get_hoplimit(struct net_device *dev); | ||
127 | |||
128 | /* | 133 | /* |
129 | * anycast prototypes (anycast.c) | 134 | * anycast prototypes (anycast.c) |
130 | */ | 135 | */ |
@@ -135,7 +140,8 @@ extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex); | |||
135 | 140 | ||
136 | extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr); | 141 | extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr); |
137 | extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr); | 142 | extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr); |
138 | extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr); | 143 | extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, |
144 | struct in6_addr *addr); | ||
139 | 145 | ||
140 | 146 | ||
141 | /* Device notifier */ | 147 | /* Device notifier */ |
@@ -185,26 +191,6 @@ static inline void in6_ifa_put(struct inet6_ifaddr *ifp) | |||
185 | #define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt) | 191 | #define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt) |
186 | 192 | ||
187 | 193 | ||
188 | extern void addrconf_forwarding_on(void); | ||
189 | /* | ||
190 | * Hash function taken from net_alias.c | ||
191 | */ | ||
192 | |||
193 | static __inline__ u8 ipv6_addr_hash(const struct in6_addr *addr) | ||
194 | { | ||
195 | __u32 word; | ||
196 | |||
197 | /* | ||
198 | * We perform the hash function over the last 64 bits of the address | ||
199 | * This will include the IEEE address token on links that support it. | ||
200 | */ | ||
201 | |||
202 | word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); | ||
203 | word ^= (word >> 16); | ||
204 | word ^= (word >> 8); | ||
205 | |||
206 | return ((word ^ (word >> 4)) & 0x0f); | ||
207 | } | ||
208 | 194 | ||
209 | /* | 195 | /* |
210 | * compute link-local solicited-node multicast address | 196 | * compute link-local solicited-node multicast address |
@@ -214,61 +200,31 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr, | |||
214 | struct in6_addr *solicited) | 200 | struct in6_addr *solicited) |
215 | { | 201 | { |
216 | ipv6_addr_set(solicited, | 202 | ipv6_addr_set(solicited, |
217 | __constant_htonl(0xFF020000), 0, | 203 | htonl(0xFF020000), 0, |
218 | __constant_htonl(0x1), | 204 | htonl(0x1), |
219 | __constant_htonl(0xFF000000) | addr->s6_addr32[3]); | 205 | htonl(0xFF000000) | addr->s6_addr32[3]); |
220 | } | ||
221 | |||
222 | |||
223 | static inline void ipv6_addr_all_nodes(struct in6_addr *addr) | ||
224 | { | ||
225 | ipv6_addr_set(addr, | ||
226 | __constant_htonl(0xFF020000), 0, 0, | ||
227 | __constant_htonl(0x1)); | ||
228 | } | ||
229 | |||
230 | static inline void ipv6_addr_all_routers(struct in6_addr *addr) | ||
231 | { | ||
232 | ipv6_addr_set(addr, | ||
233 | __constant_htonl(0xFF020000), 0, 0, | ||
234 | __constant_htonl(0x2)); | ||
235 | } | 206 | } |
236 | 207 | ||
237 | static inline int ipv6_addr_is_multicast(const struct in6_addr *addr) | 208 | static inline int ipv6_addr_is_multicast(const struct in6_addr *addr) |
238 | { | 209 | { |
239 | return (addr->s6_addr32[0] & __constant_htonl(0xFF000000)) == __constant_htonl(0xFF000000); | 210 | return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); |
240 | } | 211 | } |
241 | 212 | ||
242 | static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) | 213 | static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) |
243 | { | 214 | { |
244 | return (addr->s6_addr32[0] == htonl(0xff020000) && | 215 | return (((addr->s6_addr32[0] ^ htonl(0xff020000)) | |
245 | addr->s6_addr32[1] == 0 && | 216 | addr->s6_addr32[1] | addr->s6_addr32[2] | |
246 | addr->s6_addr32[2] == 0 && | 217 | (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0); |
247 | addr->s6_addr32[3] == htonl(0x00000001)); | ||
248 | } | 218 | } |
249 | 219 | ||
250 | static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) | 220 | static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) |
251 | { | 221 | { |
252 | return (addr->s6_addr32[0] == htonl(0xff020000) && | 222 | return (((addr->s6_addr32[0] ^ htonl(0xff020000)) | |
253 | addr->s6_addr32[1] == 0 && | 223 | addr->s6_addr32[1] | addr->s6_addr32[2] | |
254 | addr->s6_addr32[2] == 0 && | 224 | (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0); |
255 | addr->s6_addr32[3] == htonl(0x00000002)); | ||
256 | } | 225 | } |
257 | 226 | ||
258 | static inline int ipv6_isatap_eui64(u8 *eui, __be32 addr) | 227 | extern int __ipv6_isatap_ifid(u8 *eui, __be32 addr); |
259 | { | ||
260 | eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) || | ||
261 | ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) || | ||
262 | ipv4_is_private_172(addr) || ipv4_is_test_192(addr) || | ||
263 | ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) || | ||
264 | ipv4_is_test_198(addr) || ipv4_is_multicast(addr) || | ||
265 | ipv4_is_lbcast(addr)) ? 0x00 : 0x02; | ||
266 | eui[1] = 0; | ||
267 | eui[2] = 0x5E; | ||
268 | eui[3] = 0xFE; | ||
269 | memcpy (eui+4, &addr, 4); | ||
270 | return 0; | ||
271 | } | ||
272 | 228 | ||
273 | static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) | 229 | static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) |
274 | { | 230 | { |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index bcc480b8892a..e00750836ba5 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -12,6 +12,16 @@ | |||
12 | * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> | 12 | * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | /** | ||
16 | * struct vif_params - describes virtual interface parameters | ||
17 | * @mesh_id: mesh ID to use | ||
18 | * @mesh_id_len: length of the mesh ID | ||
19 | */ | ||
20 | struct vif_params { | ||
21 | u8 *mesh_id; | ||
22 | int mesh_id_len; | ||
23 | }; | ||
24 | |||
15 | /* Radiotap header iteration | 25 | /* Radiotap header iteration |
16 | * implemented in net/wireless/radiotap.c | 26 | * implemented in net/wireless/radiotap.c |
17 | * docs in Documentation/networking/radiotap-headers.txt | 27 | * docs in Documentation/networking/radiotap-headers.txt |
@@ -109,6 +119,19 @@ enum station_flags { | |||
109 | }; | 119 | }; |
110 | 120 | ||
111 | /** | 121 | /** |
122 | * enum plink_action - actions to perform in mesh peers | ||
123 | * | ||
124 | * @PLINK_ACTION_INVALID: action 0 is reserved | ||
125 | * @PLINK_ACTION_OPEN: start mesh peer link establishment | ||
126 | * @PLINK_ACTION_BLOCL: block traffic from this mesh peer | ||
127 | */ | ||
128 | enum plink_actions { | ||
129 | PLINK_ACTION_INVALID, | ||
130 | PLINK_ACTION_OPEN, | ||
131 | PLINK_ACTION_BLOCK, | ||
132 | }; | ||
133 | |||
134 | /** | ||
112 | * struct station_parameters - station parameters | 135 | * struct station_parameters - station parameters |
113 | * | 136 | * |
114 | * Used to change and create a new station. | 137 | * Used to change and create a new station. |
@@ -128,41 +151,124 @@ struct station_parameters { | |||
128 | int listen_interval; | 151 | int listen_interval; |
129 | u16 aid; | 152 | u16 aid; |
130 | u8 supported_rates_len; | 153 | u8 supported_rates_len; |
154 | u8 plink_action; | ||
131 | }; | 155 | }; |
132 | 156 | ||
133 | /** | 157 | /** |
134 | * enum station_stats_flags - station statistics flags | 158 | * enum station_info_flags - station information flags |
135 | * | 159 | * |
136 | * Used by the driver to indicate which info in &struct station_stats | 160 | * Used by the driver to indicate which info in &struct station_info |
137 | * it has filled in during get_station(). | 161 | * it has filled in during get_station() or dump_station(). |
138 | * | 162 | * |
139 | * @STATION_STAT_INACTIVE_TIME: @inactive_time filled | 163 | * @STATION_INFO_INACTIVE_TIME: @inactive_time filled |
140 | * @STATION_STAT_RX_BYTES: @rx_bytes filled | 164 | * @STATION_INFO_RX_BYTES: @rx_bytes filled |
141 | * @STATION_STAT_TX_BYTES: @tx_bytes filled | 165 | * @STATION_INFO_TX_BYTES: @tx_bytes filled |
166 | * @STATION_INFO_LLID: @llid filled | ||
167 | * @STATION_INFO_PLID: @plid filled | ||
168 | * @STATION_INFO_PLINK_STATE: @plink_state filled | ||
142 | */ | 169 | */ |
143 | enum station_stats_flags { | 170 | enum station_info_flags { |
144 | STATION_STAT_INACTIVE_TIME = 1<<0, | 171 | STATION_INFO_INACTIVE_TIME = 1<<0, |
145 | STATION_STAT_RX_BYTES = 1<<1, | 172 | STATION_INFO_RX_BYTES = 1<<1, |
146 | STATION_STAT_TX_BYTES = 1<<2, | 173 | STATION_INFO_TX_BYTES = 1<<2, |
174 | STATION_INFO_LLID = 1<<3, | ||
175 | STATION_INFO_PLID = 1<<4, | ||
176 | STATION_INFO_PLINK_STATE = 1<<5, | ||
147 | }; | 177 | }; |
148 | 178 | ||
149 | /** | 179 | /** |
150 | * struct station_stats - station statistics | 180 | * struct station_info - station information |
151 | * | 181 | * |
152 | * Station information filled by driver for get_station(). | 182 | * Station information filled by driver for get_station() and dump_station. |
153 | * | 183 | * |
154 | * @filled: bitflag of flags from &enum station_stats_flags | 184 | * @filled: bitflag of flags from &enum station_info_flags |
155 | * @inactive_time: time since last station activity (tx/rx) in milliseconds | 185 | * @inactive_time: time since last station activity (tx/rx) in milliseconds |
156 | * @rx_bytes: bytes received from this station | 186 | * @rx_bytes: bytes received from this station |
157 | * @tx_bytes: bytes transmitted to this station | 187 | * @tx_bytes: bytes transmitted to this station |
188 | * @llid: mesh local link id | ||
189 | * @plid: mesh peer link id | ||
190 | * @plink_state: mesh peer link state | ||
158 | */ | 191 | */ |
159 | struct station_stats { | 192 | struct station_info { |
160 | u32 filled; | 193 | u32 filled; |
161 | u32 inactive_time; | 194 | u32 inactive_time; |
162 | u32 rx_bytes; | 195 | u32 rx_bytes; |
163 | u32 tx_bytes; | 196 | u32 tx_bytes; |
197 | u16 llid; | ||
198 | u16 plid; | ||
199 | u8 plink_state; | ||
200 | }; | ||
201 | |||
202 | /** | ||
203 | * enum monitor_flags - monitor flags | ||
204 | * | ||
205 | * Monitor interface configuration flags. Note that these must be the bits | ||
206 | * according to the nl80211 flags. | ||
207 | * | ||
208 | * @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS | ||
209 | * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP | ||
210 | * @MONITOR_FLAG_CONTROL: pass control frames | ||
211 | * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering | ||
212 | * @MONITOR_FLAG_COOK_FRAMES: report frames after processing | ||
213 | */ | ||
214 | enum monitor_flags { | ||
215 | MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL, | ||
216 | MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL, | ||
217 | MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL, | ||
218 | MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS, | ||
219 | MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES, | ||
164 | }; | 220 | }; |
165 | 221 | ||
222 | /** | ||
223 | * enum mpath_info_flags - mesh path information flags | ||
224 | * | ||
225 | * Used by the driver to indicate which info in &struct mpath_info it has filled | ||
226 | * in during get_station() or dump_station(). | ||
227 | * | ||
228 | * MPATH_INFO_FRAME_QLEN: @frame_qlen filled | ||
229 | * MPATH_INFO_DSN: @dsn filled | ||
230 | * MPATH_INFO_METRIC: @metric filled | ||
231 | * MPATH_INFO_EXPTIME: @exptime filled | ||
232 | * MPATH_INFO_DISCOVERY_TIMEOUT: @discovery_timeout filled | ||
233 | * MPATH_INFO_DISCOVERY_RETRIES: @discovery_retries filled | ||
234 | * MPATH_INFO_FLAGS: @flags filled | ||
235 | */ | ||
236 | enum mpath_info_flags { | ||
237 | MPATH_INFO_FRAME_QLEN = BIT(0), | ||
238 | MPATH_INFO_DSN = BIT(1), | ||
239 | MPATH_INFO_METRIC = BIT(2), | ||
240 | MPATH_INFO_EXPTIME = BIT(3), | ||
241 | MPATH_INFO_DISCOVERY_TIMEOUT = BIT(4), | ||
242 | MPATH_INFO_DISCOVERY_RETRIES = BIT(5), | ||
243 | MPATH_INFO_FLAGS = BIT(6), | ||
244 | }; | ||
245 | |||
246 | /** | ||
247 | * struct mpath_info - mesh path information | ||
248 | * | ||
249 | * Mesh path information filled by driver for get_mpath() and dump_mpath(). | ||
250 | * | ||
251 | * @filled: bitfield of flags from &enum mpath_info_flags | ||
252 | * @frame_qlen: number of queued frames for this destination | ||
253 | * @dsn: destination sequence number | ||
254 | * @metric: metric (cost) of this mesh path | ||
255 | * @exptime: expiration time for the mesh path from now, in msecs | ||
256 | * @flags: mesh path flags | ||
257 | * @discovery_timeout: total mesh path discovery timeout, in msecs | ||
258 | * @discovery_retries: mesh path discovery retries | ||
259 | */ | ||
260 | struct mpath_info { | ||
261 | u32 filled; | ||
262 | u32 frame_qlen; | ||
263 | u32 dsn; | ||
264 | u32 metric; | ||
265 | u32 exptime; | ||
266 | u32 discovery_timeout; | ||
267 | u8 discovery_retries; | ||
268 | u8 flags; | ||
269 | }; | ||
270 | |||
271 | |||
166 | /* from net/wireless.h */ | 272 | /* from net/wireless.h */ |
167 | struct wiphy; | 273 | struct wiphy; |
168 | 274 | ||
@@ -210,13 +316,17 @@ struct wiphy; | |||
210 | * @del_station: Remove a station; @mac may be NULL to remove all stations. | 316 | * @del_station: Remove a station; @mac may be NULL to remove all stations. |
211 | * | 317 | * |
212 | * @change_station: Modify a given station. | 318 | * @change_station: Modify a given station. |
319 | * | ||
320 | * @set_mesh_cfg: set mesh parameters (by now, just mesh id) | ||
213 | */ | 321 | */ |
214 | struct cfg80211_ops { | 322 | struct cfg80211_ops { |
215 | int (*add_virtual_intf)(struct wiphy *wiphy, char *name, | 323 | int (*add_virtual_intf)(struct wiphy *wiphy, char *name, |
216 | enum nl80211_iftype type); | 324 | enum nl80211_iftype type, u32 *flags, |
325 | struct vif_params *params); | ||
217 | int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex); | 326 | int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex); |
218 | int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex, | 327 | int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex, |
219 | enum nl80211_iftype type); | 328 | enum nl80211_iftype type, u32 *flags, |
329 | struct vif_params *params); | ||
220 | 330 | ||
221 | int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, | 331 | int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, |
222 | u8 key_index, u8 *mac_addr, | 332 | u8 key_index, u8 *mac_addr, |
@@ -244,7 +354,22 @@ struct cfg80211_ops { | |||
244 | int (*change_station)(struct wiphy *wiphy, struct net_device *dev, | 354 | int (*change_station)(struct wiphy *wiphy, struct net_device *dev, |
245 | u8 *mac, struct station_parameters *params); | 355 | u8 *mac, struct station_parameters *params); |
246 | int (*get_station)(struct wiphy *wiphy, struct net_device *dev, | 356 | int (*get_station)(struct wiphy *wiphy, struct net_device *dev, |
247 | u8 *mac, struct station_stats *stats); | 357 | u8 *mac, struct station_info *sinfo); |
358 | int (*dump_station)(struct wiphy *wiphy, struct net_device *dev, | ||
359 | int idx, u8 *mac, struct station_info *sinfo); | ||
360 | |||
361 | int (*add_mpath)(struct wiphy *wiphy, struct net_device *dev, | ||
362 | u8 *dst, u8 *next_hop); | ||
363 | int (*del_mpath)(struct wiphy *wiphy, struct net_device *dev, | ||
364 | u8 *dst); | ||
365 | int (*change_mpath)(struct wiphy *wiphy, struct net_device *dev, | ||
366 | u8 *dst, u8 *next_hop); | ||
367 | int (*get_mpath)(struct wiphy *wiphy, struct net_device *dev, | ||
368 | u8 *dst, u8 *next_hop, | ||
369 | struct mpath_info *pinfo); | ||
370 | int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, | ||
371 | int idx, u8 *dst, u8 *next_hop, | ||
372 | struct mpath_info *pinfo); | ||
248 | }; | 373 | }; |
249 | 374 | ||
250 | #endif /* __NET_CFG80211_H */ | 375 | #endif /* __NET_CFG80211_H */ |
diff --git a/include/net/dst.h b/include/net/dst.h index ae13370e8484..002500e631f5 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -163,15 +163,7 @@ struct dst_entry * dst_clone(struct dst_entry * dst) | |||
163 | return dst; | 163 | return dst; |
164 | } | 164 | } |
165 | 165 | ||
166 | static inline | 166 | extern void dst_release(struct dst_entry *dst); |
167 | void dst_release(struct dst_entry * dst) | ||
168 | { | ||
169 | if (dst) { | ||
170 | WARN_ON(atomic_read(&dst->__refcnt) < 1); | ||
171 | smp_mb__before_atomic_dec(); | ||
172 | atomic_dec(&dst->__refcnt); | ||
173 | } | ||
174 | } | ||
175 | 167 | ||
176 | /* Children define the path of the packet through the | 168 | /* Children define the path of the packet through the |
177 | * Linux networking. Thus, destinations are stackable. | 169 | * Linux networking. Thus, destinations are stackable. |
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 34349f9f4331..a5c6ccc5bb19 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h | |||
@@ -87,6 +87,7 @@ static inline void fib_rule_get(struct fib_rule *rule) | |||
87 | static inline void fib_rule_put_rcu(struct rcu_head *head) | 87 | static inline void fib_rule_put_rcu(struct rcu_head *head) |
88 | { | 88 | { |
89 | struct fib_rule *rule = container_of(head, struct fib_rule, rcu); | 89 | struct fib_rule *rule = container_of(head, struct fib_rule, rcu); |
90 | release_net(rule->fr_net); | ||
90 | kfree(rule); | 91 | kfree(rule); |
91 | } | 92 | } |
92 | 93 | ||
diff --git a/include/net/icmp.h b/include/net/icmp.h index 9f7ef3c8baef..dddb839ff4b5 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h | |||
@@ -48,7 +48,7 @@ struct sk_buff; | |||
48 | extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); | 48 | extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); |
49 | extern int icmp_rcv(struct sk_buff *skb); | 49 | extern int icmp_rcv(struct sk_buff *skb); |
50 | extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg); | 50 | extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
51 | extern void icmp_init(struct net_proto_family *ops); | 51 | extern int icmp_init(void); |
52 | extern void icmp_out_count(unsigned char type); | 52 | extern void icmp_out_count(unsigned char type); |
53 | 53 | ||
54 | /* Move into dst.h ? */ | 54 | /* Move into dst.h ? */ |
@@ -65,11 +65,4 @@ static inline struct raw_sock *raw_sk(const struct sock *sk) | |||
65 | return (struct raw_sock *)sk; | 65 | return (struct raw_sock *)sk; |
66 | } | 66 | } |
67 | 67 | ||
68 | extern int sysctl_icmp_echo_ignore_all; | ||
69 | extern int sysctl_icmp_echo_ignore_broadcasts; | ||
70 | extern int sysctl_icmp_ignore_bogus_error_responses; | ||
71 | extern int sysctl_icmp_errors_use_inbound_ifaddr; | ||
72 | extern int sysctl_icmp_ratelimit; | ||
73 | extern int sysctl_icmp_ratemask; | ||
74 | |||
75 | #endif /* _ICMP_H */ | 68 | #endif /* _ICMP_H */ |
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index 285b2adfa648..529816bfbc52 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h | |||
@@ -183,7 +183,6 @@ const char *escape_essid(const char *essid, u8 essid_len); | |||
183 | #define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a) | 183 | #define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a) |
184 | #define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a) | 184 | #define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a) |
185 | #include <linux/netdevice.h> | 185 | #include <linux/netdevice.h> |
186 | #include <linux/wireless.h> | ||
187 | #include <linux/if_arp.h> /* ARPHRD_ETHER */ | 186 | #include <linux/if_arp.h> /* ARPHRD_ETHER */ |
188 | 187 | ||
189 | #ifndef WIRELESS_SPY | 188 | #ifndef WIRELESS_SPY |
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h deleted file mode 100644 index 1ef6282fdded..000000000000 --- a/include/net/ieee80211softmac.h +++ /dev/null | |||
@@ -1,373 +0,0 @@ | |||
1 | /* | ||
2 | * ieee80211softmac.h - public interface to the softmac | ||
3 | * | ||
4 | * Copyright (c) 2005 Johannes Berg <johannes@sipsolutions.net> | ||
5 | * Joseph Jezak <josejx@gentoo.org> | ||
6 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
7 | * Danny van Dyk <kugelfang@gentoo.org> | ||
8 | * Michael Buesch <mbuesch@freenet.de> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | */ | ||
26 | |||
27 | #ifndef IEEE80211SOFTMAC_H_ | ||
28 | #define IEEE80211SOFTMAC_H_ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/spinlock.h> | ||
32 | #include <linux/workqueue.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <net/ieee80211.h> | ||
35 | |||
36 | /* Once the API is considered more or less stable, | ||
37 | * this should be incremented on API incompatible changes. | ||
38 | */ | ||
39 | #define IEEE80211SOFTMAC_API 0 | ||
40 | |||
41 | #define IEEE80211SOFTMAC_MAX_RATES_LEN 8 | ||
42 | #define IEEE80211SOFTMAC_MAX_EX_RATES_LEN 255 | ||
43 | |||
44 | struct ieee80211softmac_ratesinfo { | ||
45 | u8 count; | ||
46 | u8 rates[IEEE80211SOFTMAC_MAX_RATES_LEN + IEEE80211SOFTMAC_MAX_EX_RATES_LEN]; | ||
47 | }; | ||
48 | |||
49 | /* internal structures */ | ||
50 | struct ieee80211softmac_network; | ||
51 | struct ieee80211softmac_scaninfo; | ||
52 | |||
53 | struct ieee80211softmac_essid { | ||
54 | u8 len; | ||
55 | char data[IW_ESSID_MAX_SIZE+1]; | ||
56 | }; | ||
57 | |||
58 | struct ieee80211softmac_wpa { | ||
59 | char *IE; | ||
60 | int IElen; | ||
61 | int IEbuflen; | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * Information about association | ||
66 | */ | ||
67 | struct ieee80211softmac_assoc_info { | ||
68 | |||
69 | struct mutex mutex; | ||
70 | |||
71 | /* | ||
72 | * This is the requested ESSID. It is written | ||
73 | * only by the WX handlers. | ||
74 | * | ||
75 | */ | ||
76 | struct ieee80211softmac_essid req_essid; | ||
77 | /* | ||
78 | * the ESSID of the network we're currently | ||
79 | * associated (or trying) to. This is | ||
80 | * updated to the network's actual ESSID | ||
81 | * even if the requested ESSID was 'ANY' | ||
82 | */ | ||
83 | struct ieee80211softmac_essid associate_essid; | ||
84 | |||
85 | /* BSSID we're trying to associate to */ | ||
86 | char bssid[ETH_ALEN]; | ||
87 | |||
88 | /* some flags. | ||
89 | * static_essid is valid if the essid is constant, | ||
90 | * this is for use by the wx handlers only. | ||
91 | * | ||
92 | * associating is true, if the network has been | ||
93 | * auth'ed on and we are in the process of associating. | ||
94 | * | ||
95 | * bssvalid is true if we found a matching network | ||
96 | * and saved it's BSSID into the bssid above. | ||
97 | * | ||
98 | * bssfixed is used for SIOCSIWAP. | ||
99 | */ | ||
100 | u8 static_essid; | ||
101 | u8 short_preamble_available; | ||
102 | u8 associating; | ||
103 | u8 associated; | ||
104 | u8 assoc_wait; | ||
105 | u8 bssvalid; | ||
106 | u8 bssfixed; | ||
107 | |||
108 | /* Scan retries remaining */ | ||
109 | int scan_retry; | ||
110 | |||
111 | struct delayed_work work; | ||
112 | struct delayed_work timeout; | ||
113 | }; | ||
114 | |||
115 | struct ieee80211softmac_bss_info { | ||
116 | /* Rates supported by the network */ | ||
117 | struct ieee80211softmac_ratesinfo supported_rates; | ||
118 | |||
119 | /* This indicates whether frames can currently be transmitted with | ||
120 | * short preamble (only use this variable during TX at CCK rates) */ | ||
121 | u8 short_preamble:1; | ||
122 | |||
123 | /* This indicates whether protection (e.g. self-CTS) should be used | ||
124 | * when transmitting with OFDM modulation */ | ||
125 | u8 use_protection:1; | ||
126 | }; | ||
127 | |||
128 | enum { | ||
129 | IEEE80211SOFTMAC_AUTH_OPEN_REQUEST = 1, | ||
130 | IEEE80211SOFTMAC_AUTH_OPEN_RESPONSE = 2, | ||
131 | }; | ||
132 | |||
133 | enum { | ||
134 | IEEE80211SOFTMAC_AUTH_SHARED_REQUEST = 1, | ||
135 | IEEE80211SOFTMAC_AUTH_SHARED_CHALLENGE = 2, | ||
136 | IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE = 3, | ||
137 | IEEE80211SOFTMAC_AUTH_SHARED_PASS = 4, | ||
138 | }; | ||
139 | |||
140 | /* We should make these tunable | ||
141 | * AUTH_TIMEOUT seems really long, but that's what it is in BSD */ | ||
142 | #define IEEE80211SOFTMAC_AUTH_TIMEOUT (12 * HZ) | ||
143 | #define IEEE80211SOFTMAC_AUTH_RETRY_LIMIT 5 | ||
144 | #define IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT 3 | ||
145 | |||
146 | struct ieee80211softmac_txrates { | ||
147 | /* The Bit-Rate to be used for multicast frames. */ | ||
148 | u8 mcast_rate; | ||
149 | |||
150 | /* The Bit-Rate to be used for multicast management frames. */ | ||
151 | u8 mgt_mcast_rate; | ||
152 | |||
153 | /* The Bit-Rate to be used for any other (normal) data packet. */ | ||
154 | u8 default_rate; | ||
155 | /* The Bit-Rate to be used for default fallback | ||
156 | * (If the device supports fallback and hardware-retry) | ||
157 | */ | ||
158 | u8 default_fallback; | ||
159 | |||
160 | /* This is the rate that the user asked for */ | ||
161 | u8 user_rate; | ||
162 | }; | ||
163 | |||
164 | /* Bits for txrates_change callback. */ | ||
165 | #define IEEE80211SOFTMAC_TXRATECHG_DEFAULT (1 << 0) /* default_rate */ | ||
166 | #define IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK (1 << 1) /* default_fallback */ | ||
167 | #define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */ | ||
168 | #define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */ | ||
169 | |||
170 | #define IEEE80211SOFTMAC_BSSINFOCHG_RATES (1 << 0) /* supported_rates */ | ||
171 | #define IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE (1 << 1) /* short_preamble */ | ||
172 | #define IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION (1 << 2) /* use_protection */ | ||
173 | |||
174 | struct ieee80211softmac_device { | ||
175 | /* 802.11 structure for data stuff */ | ||
176 | struct ieee80211_device *ieee; | ||
177 | struct net_device *dev; | ||
178 | |||
179 | /* only valid if associated, then holds the Association ID */ | ||
180 | u16 association_id; | ||
181 | |||
182 | /* the following methods are callbacks that the driver | ||
183 | * using this framework has to assign | ||
184 | */ | ||
185 | |||
186 | /* always assign these */ | ||
187 | void (*set_bssid_filter)(struct net_device *dev, const u8 *bssid); | ||
188 | void (*set_channel)(struct net_device *dev, u8 channel); | ||
189 | |||
190 | /* assign if you need it, informational only */ | ||
191 | void (*link_change)(struct net_device *dev); | ||
192 | |||
193 | /* If the hardware can do scanning, assign _all_ three of these callbacks. | ||
194 | * When the scan finishes, call ieee80211softmac_scan_finished(). | ||
195 | */ | ||
196 | |||
197 | /* when called, start_scan is guaranteed to not be called again | ||
198 | * until you call ieee80211softmac_scan_finished. | ||
199 | * Return 0 if scanning could start, error otherwise. | ||
200 | * SOFTMAC AUTHORS: don't call this, use ieee80211softmac_start_scan */ | ||
201 | int (*start_scan)(struct net_device *dev); | ||
202 | /* this should block until after ieee80211softmac_scan_finished was called | ||
203 | * SOFTMAC AUTHORS: don't call this, use ieee80211softmac_wait_for_scan */ | ||
204 | void (*wait_for_scan)(struct net_device *dev); | ||
205 | /* stop_scan aborts a scan, but is asynchronous. | ||
206 | * if you want to wait for it too, use wait_for_scan | ||
207 | * SOFTMAC AUTHORS: don't call this, use ieee80211softmac_stop_scan */ | ||
208 | void (*stop_scan)(struct net_device *dev); | ||
209 | |||
210 | /* we'll need something about beacons here too, for AP or ad-hoc modes */ | ||
211 | |||
212 | /* Transmission rates to be used by the driver. | ||
213 | * The SoftMAC figures out the best possible rates. | ||
214 | * The driver just needs to read them. | ||
215 | */ | ||
216 | struct ieee80211softmac_txrates txrates; | ||
217 | |||
218 | /* If the driver needs to do stuff on TX rate changes, assign this | ||
219 | * callback. See IEEE80211SOFTMAC_TXRATECHG for change flags. */ | ||
220 | void (*txrates_change)(struct net_device *dev, | ||
221 | u32 changes); | ||
222 | |||
223 | /* If the driver needs to do stuff when BSS properties change, assign | ||
224 | * this callback. see IEEE80211SOFTMAC_BSSINFOCHG for change flags. */ | ||
225 | void (*bssinfo_change)(struct net_device *dev, | ||
226 | u32 changes); | ||
227 | |||
228 | /* private stuff follows */ | ||
229 | /* this lock protects this structure */ | ||
230 | spinlock_t lock; | ||
231 | |||
232 | struct workqueue_struct *wq; | ||
233 | |||
234 | u8 running; /* SoftMAC started? */ | ||
235 | u8 scanning; | ||
236 | |||
237 | struct ieee80211softmac_scaninfo *scaninfo; | ||
238 | struct ieee80211softmac_assoc_info associnfo; | ||
239 | struct ieee80211softmac_bss_info bssinfo; | ||
240 | |||
241 | struct list_head auth_queue; | ||
242 | struct list_head events; | ||
243 | |||
244 | struct ieee80211softmac_ratesinfo ratesinfo; | ||
245 | int txrate_badness; | ||
246 | |||
247 | /* WPA stuff */ | ||
248 | struct ieee80211softmac_wpa wpa; | ||
249 | |||
250 | /* we need to keep a list of network structs we copied */ | ||
251 | struct list_head network_list; | ||
252 | |||
253 | /* This must be the last item so that it points to the data | ||
254 | * allocated beyond this structure by alloc_ieee80211 */ | ||
255 | u8 priv[0]; | ||
256 | }; | ||
257 | |||
258 | extern void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm); | ||
259 | |||
260 | static inline void * ieee80211softmac_priv(struct net_device *dev) | ||
261 | { | ||
262 | return ((struct ieee80211softmac_device *)ieee80211_priv(dev))->priv; | ||
263 | } | ||
264 | |||
265 | extern struct net_device * alloc_ieee80211softmac(int sizeof_priv); | ||
266 | extern void free_ieee80211softmac(struct net_device *dev); | ||
267 | |||
268 | /* Call this function if you detect a lost TX fragment. | ||
269 | * (If the device indicates failure of ACK RX, for example.) | ||
270 | * It is wise to call this function if you are able to detect lost packets, | ||
271 | * because it contributes to the TX Rates auto adjustment. | ||
272 | */ | ||
273 | extern void ieee80211softmac_fragment_lost(struct net_device *dev, | ||
274 | u16 wireless_sequence_number); | ||
275 | /* Call this function before _start to tell the softmac what rates | ||
276 | * the hw supports. The rates parameter is copied, so you can | ||
277 | * free it right after calling this function. | ||
278 | * Note that the rates need to be sorted. */ | ||
279 | extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates); | ||
280 | |||
281 | /* Finds the highest rate which is: | ||
282 | * 1. Present in ri (optionally a basic rate) | ||
283 | * 2. Supported by the device | ||
284 | * 3. Less than or equal to the user-defined rate | ||
285 | */ | ||
286 | extern u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac, | ||
287 | struct ieee80211softmac_ratesinfo *ri, int basic_only); | ||
288 | |||
289 | /* Helper function which advises you the rate at which a frame should be | ||
290 | * transmitted at. */ | ||
291 | static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac, | ||
292 | int is_multicast, | ||
293 | int is_mgt) | ||
294 | { | ||
295 | struct ieee80211softmac_txrates *txrates = &mac->txrates; | ||
296 | |||
297 | if (!mac->associnfo.associated) | ||
298 | return txrates->mgt_mcast_rate; | ||
299 | |||
300 | /* We are associated, sending unicast frame */ | ||
301 | if (!is_multicast) | ||
302 | return txrates->default_rate; | ||
303 | |||
304 | /* We are associated, sending multicast frame */ | ||
305 | if (is_mgt) | ||
306 | return txrates->mgt_mcast_rate; | ||
307 | else | ||
308 | return txrates->mcast_rate; | ||
309 | } | ||
310 | |||
311 | /* Helper function which advises you when it is safe to transmit with short | ||
312 | * preamble. | ||
313 | * You should only call this function when transmitting at CCK rates. */ | ||
314 | static inline int ieee80211softmac_short_preamble_ok(struct ieee80211softmac_device *mac, | ||
315 | int is_multicast, | ||
316 | int is_mgt) | ||
317 | { | ||
318 | return (is_multicast && is_mgt) ? 0 : mac->bssinfo.short_preamble; | ||
319 | } | ||
320 | |||
321 | /* Helper function which advises you whether protection (e.g. self-CTS) is | ||
322 | * needed. 1 = protection needed, 0 = no protection needed | ||
323 | * Only use this function when transmitting with OFDM modulation. */ | ||
324 | static inline int ieee80211softmac_protection_needed(struct ieee80211softmac_device *mac) | ||
325 | { | ||
326 | return mac->bssinfo.use_protection; | ||
327 | } | ||
328 | |||
329 | /* Start the SoftMAC. Call this after you initialized the device | ||
330 | * and it is ready to run. | ||
331 | */ | ||
332 | extern void ieee80211softmac_start(struct net_device *dev); | ||
333 | /* Stop the SoftMAC. Call this before you shutdown the device. */ | ||
334 | extern void ieee80211softmac_stop(struct net_device *dev); | ||
335 | |||
336 | /* | ||
337 | * Event system | ||
338 | */ | ||
339 | |||
340 | /* valid event types */ | ||
341 | #define IEEE80211SOFTMAC_EVENT_ANY -1 /*private use only*/ | ||
342 | #define IEEE80211SOFTMAC_EVENT_SCAN_FINISHED 0 | ||
343 | #define IEEE80211SOFTMAC_EVENT_ASSOCIATED 1 | ||
344 | #define IEEE80211SOFTMAC_EVENT_ASSOCIATE_FAILED 2 | ||
345 | #define IEEE80211SOFTMAC_EVENT_ASSOCIATE_TIMEOUT 3 | ||
346 | #define IEEE80211SOFTMAC_EVENT_AUTHENTICATED 4 | ||
347 | #define IEEE80211SOFTMAC_EVENT_AUTH_FAILED 5 | ||
348 | #define IEEE80211SOFTMAC_EVENT_AUTH_TIMEOUT 6 | ||
349 | #define IEEE80211SOFTMAC_EVENT_ASSOCIATE_NET_NOT_FOUND 7 | ||
350 | #define IEEE80211SOFTMAC_EVENT_DISASSOCIATED 8 | ||
351 | /* keep this updated! */ | ||
352 | #define IEEE80211SOFTMAC_EVENT_LAST 8 | ||
353 | /* | ||
354 | * If you want to be notified of certain events, you can call | ||
355 | * ieee80211softmac_notify[_atomic] with | ||
356 | * - event set to one of the constants below | ||
357 | * - fun set to a function pointer of the appropriate type | ||
358 | * - context set to the context data you want passed | ||
359 | * The return value is 0, or an error. | ||
360 | */ | ||
361 | typedef void (*notify_function_ptr)(struct net_device *dev, int event_type, void *context); | ||
362 | |||
363 | #define ieee80211softmac_notify(dev, event, fun, context) ieee80211softmac_notify_gfp(dev, event, fun, context, GFP_KERNEL); | ||
364 | #define ieee80211softmac_notify_atomic(dev, event, fun, context) ieee80211softmac_notify_gfp(dev, event, fun, context, GFP_ATOMIC); | ||
365 | |||
366 | extern int ieee80211softmac_notify_gfp(struct net_device *dev, | ||
367 | int event, notify_function_ptr fun, void *context, gfp_t gfp_mask); | ||
368 | |||
369 | /* To clear pending work (for ifconfig down, etc.) */ | ||
370 | extern void | ||
371 | ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm); | ||
372 | |||
373 | #endif /* IEEE80211SOFTMAC_H_ */ | ||
diff --git a/include/net/ieee80211softmac_wx.h b/include/net/ieee80211softmac_wx.h deleted file mode 100644 index 4ee3ad57283f..000000000000 --- a/include/net/ieee80211softmac_wx.h +++ /dev/null | |||
@@ -1,99 +0,0 @@ | |||
1 | /* | ||
2 | * This file contains the prototypes for the wireless extension | ||
3 | * handlers that the softmac API provides. Include this file to | ||
4 | * use the wx handlers, you can assign these directly. | ||
5 | * | ||
6 | * Copyright (c) 2005 Johannes Berg <johannes@sipsolutions.net> | ||
7 | * Joseph Jezak <josejx@gentoo.org> | ||
8 | * Larry Finger <Larry.Finger@lwfinger.net> | ||
9 | * Danny van Dyk <kugelfang@gentoo.org> | ||
10 | * Michael Buesch <mbuesch@freenet.de> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of version 2 of the GNU General Public License as | ||
14 | * published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
19 | * more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | * | ||
25 | * The full GNU General Public License is included in this distribution in the | ||
26 | * file called COPYING. | ||
27 | */ | ||
28 | |||
29 | #ifndef _IEEE80211SOFTMAC_WX_H | ||
30 | #define _IEEE80211SOFTMAC_WX_H | ||
31 | |||
32 | #include <net/ieee80211softmac.h> | ||
33 | #include <net/iw_handler.h> | ||
34 | |||
35 | extern int | ||
36 | ieee80211softmac_wx_trigger_scan(struct net_device *net_dev, | ||
37 | struct iw_request_info *info, | ||
38 | union iwreq_data *data, | ||
39 | char *extra); | ||
40 | |||
41 | extern int | ||
42 | ieee80211softmac_wx_get_scan_results(struct net_device *net_dev, | ||
43 | struct iw_request_info *info, | ||
44 | union iwreq_data *data, | ||
45 | char *extra); | ||
46 | |||
47 | extern int | ||
48 | ieee80211softmac_wx_set_essid(struct net_device *net_dev, | ||
49 | struct iw_request_info *info, | ||
50 | union iwreq_data *data, | ||
51 | char *extra); | ||
52 | |||
53 | extern int | ||
54 | ieee80211softmac_wx_get_essid(struct net_device *net_dev, | ||
55 | struct iw_request_info *info, | ||
56 | union iwreq_data *data, | ||
57 | char *extra); | ||
58 | |||
59 | extern int | ||
60 | ieee80211softmac_wx_set_rate(struct net_device *net_dev, | ||
61 | struct iw_request_info *info, | ||
62 | union iwreq_data *data, | ||
63 | char *extra); | ||
64 | |||
65 | extern int | ||
66 | ieee80211softmac_wx_get_rate(struct net_device *net_dev, | ||
67 | struct iw_request_info *info, | ||
68 | union iwreq_data *data, | ||
69 | char *extra); | ||
70 | |||
71 | extern int | ||
72 | ieee80211softmac_wx_get_wap(struct net_device *net_dev, | ||
73 | struct iw_request_info *info, | ||
74 | union iwreq_data *data, | ||
75 | char *extra); | ||
76 | |||
77 | extern int | ||
78 | ieee80211softmac_wx_set_wap(struct net_device *net_dev, | ||
79 | struct iw_request_info *info, | ||
80 | union iwreq_data *data, | ||
81 | char *extra); | ||
82 | |||
83 | extern int | ||
84 | ieee80211softmac_wx_set_genie(struct net_device *dev, | ||
85 | struct iw_request_info *info, | ||
86 | union iwreq_data *wrqu, | ||
87 | char *extra); | ||
88 | |||
89 | extern int | ||
90 | ieee80211softmac_wx_get_genie(struct net_device *dev, | ||
91 | struct iw_request_info *info, | ||
92 | union iwreq_data *wrqu, | ||
93 | char *extra); | ||
94 | extern int | ||
95 | ieee80211softmac_wx_set_mlme(struct net_device *dev, | ||
96 | struct iw_request_info *info, | ||
97 | union iwreq_data *wrqu, | ||
98 | char *extra); | ||
99 | #endif /* _IEEE80211SOFTMAC_WX */ | ||
diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 38d5a1e9980d..18c773286b91 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h | |||
@@ -39,6 +39,17 @@ extern int inet_getname(struct socket *sock, | |||
39 | extern int inet_ioctl(struct socket *sock, | 39 | extern int inet_ioctl(struct socket *sock, |
40 | unsigned int cmd, unsigned long arg); | 40 | unsigned int cmd, unsigned long arg); |
41 | 41 | ||
42 | extern int inet_ctl_sock_create(struct sock **sk, | ||
43 | unsigned short family, | ||
44 | unsigned short type, | ||
45 | unsigned char protocol, | ||
46 | struct net *net); | ||
47 | |||
48 | static inline void inet_ctl_sock_destroy(struct sock *sk) | ||
49 | { | ||
50 | sk_release_kernel(sk); | ||
51 | } | ||
52 | |||
42 | #endif | 53 | #endif |
43 | 54 | ||
44 | 55 | ||
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index f00f0573627b..2ff545a56fb5 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
@@ -327,11 +327,6 @@ extern void inet_csk_listen_stop(struct sock *sk); | |||
327 | 327 | ||
328 | extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); | 328 | extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); |
329 | 329 | ||
330 | extern int inet_csk_ctl_sock_create(struct socket **sock, | ||
331 | unsigned short family, | ||
332 | unsigned short type, | ||
333 | unsigned char protocol); | ||
334 | |||
335 | extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, | 330 | extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, |
336 | char __user *optval, int __user *optlen); | 331 | char __user *optval, int __user *optlen); |
337 | extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, | 332 | extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, |
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index ba33db053854..7040a782c656 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h | |||
@@ -47,7 +47,7 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) | |||
47 | } while (0) | 47 | } while (0) |
48 | 48 | ||
49 | #define IP6_ECN_flow_xmit(sk, label) do { \ | 49 | #define IP6_ECN_flow_xmit(sk, label) do { \ |
50 | if (INET_ECN_is_capable(inet_sk(sk)->tos)) \ | 50 | if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \ |
51 | (label) |= htonl(INET_ECN_ECT_0 << 20); \ | 51 | (label) |= htonl(INET_ECN_ECT_0 << 20); \ |
52 | } while (0) | 52 | } while (0) |
53 | 53 | ||
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 7374251b9787..e081eefd6f47 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h | |||
@@ -25,9 +25,9 @@ struct inet_frag_queue { | |||
25 | int meat; | 25 | int meat; |
26 | __u8 last_in; /* first/last segment arrived? */ | 26 | __u8 last_in; /* first/last segment arrived? */ |
27 | 27 | ||
28 | #define COMPLETE 4 | 28 | #define INET_FRAG_COMPLETE 4 |
29 | #define FIRST_IN 2 | 29 | #define INET_FRAG_FIRST_IN 2 |
30 | #define LAST_IN 1 | 30 | #define INET_FRAG_LAST_IN 1 |
31 | }; | 31 | }; |
32 | 32 | ||
33 | #define INETFRAGS_HASHSZ 64 | 33 | #define INETFRAGS_HASHSZ 64 |
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 97dc35ad09be..735b926a3497 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -221,26 +221,7 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /* Caller must disable local BH processing. */ | 223 | /* Caller must disable local BH processing. */ |
224 | static inline void __inet_inherit_port(struct sock *sk, struct sock *child) | 224 | extern void __inet_inherit_port(struct sock *sk, struct sock *child); |
225 | { | ||
226 | struct inet_hashinfo *table = sk->sk_prot->hashinfo; | ||
227 | const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); | ||
228 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; | ||
229 | struct inet_bind_bucket *tb; | ||
230 | |||
231 | spin_lock(&head->lock); | ||
232 | tb = inet_csk(sk)->icsk_bind_hash; | ||
233 | sk_add_bind_node(child, &tb->owners); | ||
234 | inet_csk(child)->icsk_bind_hash = tb; | ||
235 | spin_unlock(&head->lock); | ||
236 | } | ||
237 | |||
238 | static inline void inet_inherit_port(struct sock *sk, struct sock *child) | ||
239 | { | ||
240 | local_bh_disable(); | ||
241 | __inet_inherit_port(sk, child); | ||
242 | local_bh_enable(); | ||
243 | } | ||
244 | 225 | ||
245 | extern void inet_put_port(struct sock *sk); | 226 | extern void inet_put_port(struct sock *sk); |
246 | 227 | ||
@@ -314,25 +295,25 @@ typedef __u64 __bitwise __addrpair; | |||
314 | ((__force __u64)(__be32)(__saddr))); | 295 | ((__force __u64)(__be32)(__saddr))); |
315 | #endif /* __BIG_ENDIAN */ | 296 | #endif /* __BIG_ENDIAN */ |
316 | #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ | 297 | #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ |
317 | (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ | 298 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
318 | ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ | 299 | ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ |
319 | ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ | 300 | ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ |
320 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | 301 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) |
321 | #define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ | 302 | #define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ |
322 | (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ | 303 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
323 | ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ | 304 | ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ |
324 | ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ | 305 | ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ |
325 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | 306 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) |
326 | #else /* 32-bit arch */ | 307 | #else /* 32-bit arch */ |
327 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) | 308 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) |
328 | #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \ | 309 | #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \ |
329 | (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ | 310 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
330 | (inet_sk(__sk)->daddr == (__saddr)) && \ | 311 | (inet_sk(__sk)->daddr == (__saddr)) && \ |
331 | (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ | 312 | (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ |
332 | ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ | 313 | ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \ |
333 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | 314 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) |
334 | #define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \ | 315 | #define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \ |
335 | (((__sk)->sk_hash == (__hash)) && ((__sk)->sk_net == (__net)) && \ | 316 | (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ |
336 | (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ | 317 | (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ |
337 | (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ | 318 | (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ |
338 | ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ | 319 | ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 89cd011edb99..a42cd63d241a 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -43,8 +43,7 @@ struct ip_options { | |||
43 | unsigned char srr; | 43 | unsigned char srr; |
44 | unsigned char rr; | 44 | unsigned char rr; |
45 | unsigned char ts; | 45 | unsigned char ts; |
46 | unsigned char is_data:1, | 46 | unsigned char is_strictroute:1, |
47 | is_strictroute:1, | ||
48 | srr_is_hit:1, | 47 | srr_is_hit:1, |
49 | is_changed:1, | 48 | is_changed:1, |
50 | rr_needaddr:1, | 49 | rr_needaddr:1, |
@@ -137,7 +136,7 @@ struct inet_sock { | |||
137 | unsigned int flags; | 136 | unsigned int flags; |
138 | unsigned int fragsize; | 137 | unsigned int fragsize; |
139 | struct ip_options *opt; | 138 | struct ip_options *opt; |
140 | struct rtable *rt; | 139 | struct dst_entry *dst; |
141 | int length; /* Total length of all frames */ | 140 | int length; /* Total length of all frames */ |
142 | __be32 addr; | 141 | __be32 addr; |
143 | struct flowi fl; | 142 | struct flowi fl; |
@@ -195,7 +194,7 @@ static inline int inet_sk_ehashfn(const struct sock *sk) | |||
195 | 194 | ||
196 | static inline int inet_iif(const struct sk_buff *skb) | 195 | static inline int inet_iif(const struct sk_buff *skb) |
197 | { | 196 | { |
198 | return ((struct rtable *)skb->dst)->rt_iif; | 197 | return skb->rtable->rt_iif; |
199 | } | 198 | } |
200 | 199 | ||
201 | #endif /* _INET_SOCK_H */ | 200 | #endif /* _INET_SOCK_H */ |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 296547bfb0b7..95c660c9719b 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -207,4 +207,22 @@ extern void inet_twsk_schedule(struct inet_timewait_sock *tw, | |||
207 | const int timeo, const int timewait_len); | 207 | const int timeo, const int timewait_len); |
208 | extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, | 208 | extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, |
209 | struct inet_timewait_death_row *twdr); | 209 | struct inet_timewait_death_row *twdr); |
210 | |||
211 | static inline | ||
212 | struct net *twsk_net(const struct inet_timewait_sock *twsk) | ||
213 | { | ||
214 | #ifdef CONFIG_NET_NS | ||
215 | return twsk->tw_net; | ||
216 | #else | ||
217 | return &init_net; | ||
218 | #endif | ||
219 | } | ||
220 | |||
221 | static inline | ||
222 | void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net) | ||
223 | { | ||
224 | #ifdef CONFIG_NET_NS | ||
225 | twsk->tw_net = net; | ||
226 | #endif | ||
227 | } | ||
210 | #endif /* _INET_TIMEWAIT_SOCK_ */ | 228 | #endif /* _INET_TIMEWAIT_SOCK_ */ |
diff --git a/include/net/ip.h b/include/net/ip.h index 9f50d4f1f157..6d7bcd5e62d4 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -347,10 +347,11 @@ extern int ip_forward(struct sk_buff *skb); | |||
347 | extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); | 347 | extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); |
348 | extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); | 348 | extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb); |
349 | extern void ip_options_fragment(struct sk_buff *skb); | 349 | extern void ip_options_fragment(struct sk_buff *skb); |
350 | extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb); | 350 | extern int ip_options_compile(struct net *net, |
351 | extern int ip_options_get(struct ip_options **optp, | 351 | struct ip_options *opt, struct sk_buff *skb); |
352 | extern int ip_options_get(struct net *net, struct ip_options **optp, | ||
352 | unsigned char *data, int optlen); | 353 | unsigned char *data, int optlen); |
353 | extern int ip_options_get_from_user(struct ip_options **optp, | 354 | extern int ip_options_get_from_user(struct net *net, struct ip_options **optp, |
354 | unsigned char __user *data, int optlen); | 355 | unsigned char __user *data, int optlen); |
355 | extern void ip_options_undo(struct ip_options * opt); | 356 | extern void ip_options_undo(struct ip_options * opt); |
356 | extern void ip_forward_options(struct sk_buff *skb); | 357 | extern void ip_forward_options(struct sk_buff *skb); |
@@ -361,7 +362,8 @@ extern int ip_options_rcv_srr(struct sk_buff *skb); | |||
361 | */ | 362 | */ |
362 | 363 | ||
363 | extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); | 364 | extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); |
364 | extern int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc); | 365 | extern int ip_cmsg_send(struct net *net, |
366 | struct msghdr *msg, struct ipcm_cookie *ipc); | ||
365 | extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); | 367 | extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); |
366 | extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); | 368 | extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); |
367 | extern int compat_ip_setsockopt(struct sock *sk, int level, | 369 | extern int compat_ip_setsockopt(struct sock *sk, int level, |
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 953d6040ff50..7c5c0f79168a 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
@@ -174,17 +174,19 @@ struct fib6_table { | |||
174 | #define RT6_TABLE_LOCAL RT6_TABLE_MAIN | 174 | #define RT6_TABLE_LOCAL RT6_TABLE_MAIN |
175 | #endif | 175 | #endif |
176 | 176 | ||
177 | typedef struct rt6_info *(*pol_lookup_t)(struct fib6_table *, | 177 | typedef struct rt6_info *(*pol_lookup_t)(struct net *, |
178 | struct fib6_table *, | ||
178 | struct flowi *, int); | 179 | struct flowi *, int); |
179 | 180 | ||
180 | /* | 181 | /* |
181 | * exported functions | 182 | * exported functions |
182 | */ | 183 | */ |
183 | 184 | ||
184 | extern struct fib6_table * fib6_get_table(u32 id); | 185 | extern struct fib6_table *fib6_get_table(struct net *net, u32 id); |
185 | extern struct fib6_table * fib6_new_table(u32 id); | 186 | extern struct fib6_table *fib6_new_table(struct net *net, u32 id); |
186 | extern struct dst_entry * fib6_rule_lookup(struct flowi *fl, int flags, | 187 | extern struct dst_entry *fib6_rule_lookup(struct net *net, |
187 | pol_lookup_t lookup); | 188 | struct flowi *fl, int flags, |
189 | pol_lookup_t lookup); | ||
188 | 190 | ||
189 | extern struct fib6_node *fib6_lookup(struct fib6_node *root, | 191 | extern struct fib6_node *fib6_lookup(struct fib6_node *root, |
190 | struct in6_addr *daddr, | 192 | struct in6_addr *daddr, |
@@ -194,7 +196,8 @@ struct fib6_node *fib6_locate(struct fib6_node *root, | |||
194 | struct in6_addr *daddr, int dst_len, | 196 | struct in6_addr *daddr, int dst_len, |
195 | struct in6_addr *saddr, int src_len); | 197 | struct in6_addr *saddr, int src_len); |
196 | 198 | ||
197 | extern void fib6_clean_all(int (*func)(struct rt6_info *, void *arg), | 199 | extern void fib6_clean_all(struct net *net, |
200 | int (*func)(struct rt6_info *, void *arg), | ||
198 | int prune, void *arg); | 201 | int prune, void *arg); |
199 | 202 | ||
200 | extern int fib6_add(struct fib6_node *root, | 203 | extern int fib6_add(struct fib6_node *root, |
@@ -207,7 +210,8 @@ extern int fib6_del(struct rt6_info *rt, | |||
207 | extern void inet6_rt_notify(int event, struct rt6_info *rt, | 210 | extern void inet6_rt_notify(int event, struct rt6_info *rt, |
208 | struct nl_info *info); | 211 | struct nl_info *info); |
209 | 212 | ||
210 | extern void fib6_run_gc(unsigned long dummy); | 213 | extern void fib6_run_gc(unsigned long expires, |
214 | struct net *net); | ||
211 | 215 | ||
212 | extern void fib6_gc_cleanup(void); | 216 | extern void fib6_gc_cleanup(void); |
213 | 217 | ||
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f99e4f0f568f..9313491e3dad 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
@@ -30,60 +30,54 @@ struct route_info { | |||
30 | #include <linux/ip.h> | 30 | #include <linux/ip.h> |
31 | #include <linux/ipv6.h> | 31 | #include <linux/ipv6.h> |
32 | 32 | ||
33 | #define RT6_LOOKUP_F_IFACE 0x1 | 33 | #define RT6_LOOKUP_F_IFACE 0x00000001 |
34 | #define RT6_LOOKUP_F_REACHABLE 0x2 | 34 | #define RT6_LOOKUP_F_REACHABLE 0x00000002 |
35 | #define RT6_LOOKUP_F_HAS_SADDR 0x4 | 35 | #define RT6_LOOKUP_F_HAS_SADDR 0x00000004 |
36 | #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 | ||
37 | #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 | ||
38 | #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 | ||
36 | 39 | ||
37 | extern struct rt6_info ip6_null_entry; | ||
38 | 40 | ||
39 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 41 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
40 | extern struct rt6_info ip6_prohibit_entry; | 42 | extern struct rt6_info *ip6_prohibit_entry; |
41 | extern struct rt6_info ip6_blk_hole_entry; | 43 | extern struct rt6_info *ip6_blk_hole_entry; |
42 | #endif | 44 | #endif |
43 | 45 | ||
44 | extern void ip6_route_input(struct sk_buff *skb); | 46 | extern void ip6_route_input(struct sk_buff *skb); |
45 | 47 | ||
46 | extern struct dst_entry * ip6_route_output(struct sock *sk, | 48 | extern struct dst_entry * ip6_route_output(struct net *net, |
49 | struct sock *sk, | ||
47 | struct flowi *fl); | 50 | struct flowi *fl); |
48 | 51 | ||
49 | extern int ip6_route_init(void); | 52 | extern int ip6_route_init(void); |
50 | extern void ip6_route_cleanup(void); | 53 | extern void ip6_route_cleanup(void); |
51 | 54 | ||
52 | extern int ipv6_route_ioctl(unsigned int cmd, void __user *arg); | 55 | extern int ipv6_route_ioctl(struct net *net, |
56 | unsigned int cmd, | ||
57 | void __user *arg); | ||
53 | 58 | ||
54 | extern int ip6_route_add(struct fib6_config *cfg); | 59 | extern int ip6_route_add(struct fib6_config *cfg); |
55 | extern int ip6_ins_rt(struct rt6_info *); | 60 | extern int ip6_ins_rt(struct rt6_info *); |
56 | extern int ip6_del_rt(struct rt6_info *); | 61 | extern int ip6_del_rt(struct rt6_info *); |
57 | 62 | ||
58 | extern int ip6_rt_addr_add(struct in6_addr *addr, | 63 | extern struct rt6_info *rt6_lookup(struct net *net, |
59 | struct net_device *dev, | 64 | const struct in6_addr *daddr, |
60 | int anycast); | 65 | const struct in6_addr *saddr, |
61 | |||
62 | extern int ip6_rt_addr_del(struct in6_addr *addr, | ||
63 | struct net_device *dev); | ||
64 | |||
65 | extern void rt6_sndmsg(int type, struct in6_addr *dst, | ||
66 | struct in6_addr *src, | ||
67 | struct in6_addr *gw, | ||
68 | struct net_device *dev, | ||
69 | int dstlen, int srclen, | ||
70 | int metric, __u32 flags); | ||
71 | |||
72 | extern struct rt6_info *rt6_lookup(struct in6_addr *daddr, | ||
73 | struct in6_addr *saddr, | ||
74 | int oif, int flags); | 66 | int oif, int flags); |
75 | 67 | ||
76 | extern struct dst_entry *ndisc_dst_alloc(struct net_device *dev, | 68 | extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev, |
77 | struct neighbour *neigh, | 69 | struct neighbour *neigh, |
78 | struct in6_addr *addr, | 70 | const struct in6_addr *addr); |
79 | int (*output)(struct sk_buff *)); | 71 | extern int icmp6_dst_gc(int *more); |
80 | extern int ndisc_dst_gc(int *more); | 72 | |
81 | extern void fib6_force_start_gc(void); | 73 | extern void fib6_force_start_gc(struct net *net); |
82 | 74 | ||
83 | extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | 75 | extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, |
84 | const struct in6_addr *addr, | 76 | const struct in6_addr *addr, |
85 | int anycast); | 77 | int anycast); |
86 | 78 | ||
79 | extern int ip6_dst_hoplimit(struct dst_entry *dst); | ||
80 | |||
87 | /* | 81 | /* |
88 | * support functions for ND | 82 | * support functions for ND |
89 | * | 83 | * |
@@ -94,7 +88,7 @@ extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr, | |||
94 | struct net_device *dev, | 88 | struct net_device *dev, |
95 | unsigned int pref); | 89 | unsigned int pref); |
96 | 90 | ||
97 | extern void rt6_purge_dflt_routers(void); | 91 | extern void rt6_purge_dflt_routers(struct net *net); |
98 | 92 | ||
99 | extern int rt6_route_rcv(struct net_device *dev, | 93 | extern int rt6_route_rcv(struct net_device *dev, |
100 | u8 *opt, int len, | 94 | u8 *opt, int len, |
@@ -121,7 +115,7 @@ struct rt6_rtnl_dump_arg | |||
121 | }; | 115 | }; |
122 | 116 | ||
123 | extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); | 117 | extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); |
124 | extern void rt6_ifdown(struct net_device *dev); | 118 | extern void rt6_ifdown(struct net *net, struct net_device *dev); |
125 | extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); | 119 | extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); |
126 | 120 | ||
127 | extern rwlock_t rt6_lock; | 121 | extern rwlock_t rt6_lock; |
diff --git a/include/net/ipip.h b/include/net/ipip.h index 549e132bca9c..633ed4def8e3 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h | |||
@@ -24,6 +24,16 @@ struct ip_tunnel | |||
24 | int mlink; | 24 | int mlink; |
25 | 25 | ||
26 | struct ip_tunnel_parm parms; | 26 | struct ip_tunnel_parm parms; |
27 | |||
28 | struct ip_tunnel_prl_entry *prl; /* potential router list */ | ||
29 | unsigned int prl_count; /* # of entries in PRL */ | ||
30 | }; | ||
31 | |||
32 | struct ip_tunnel_prl_entry | ||
33 | { | ||
34 | struct ip_tunnel_prl_entry *next; | ||
35 | __be32 addr; | ||
36 | u16 flags; | ||
27 | }; | 37 | }; |
28 | 38 | ||
29 | #define IPTUNNEL_XMIT() do { \ | 39 | #define IPTUNNEL_XMIT() do { \ |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c0c019f72ba9..49c48983019f 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -202,6 +202,7 @@ struct ip6_flowlabel | |||
202 | u32 owner; | 202 | u32 owner; |
203 | unsigned long lastuse; | 203 | unsigned long lastuse; |
204 | unsigned long expires; | 204 | unsigned long expires; |
205 | struct net *fl_net; | ||
205 | }; | 206 | }; |
206 | 207 | ||
207 | #define IPV6_FLOWINFO_MASK __constant_htonl(0x0FFFFFFF) | 208 | #define IPV6_FLOWINFO_MASK __constant_htonl(0x0FFFFFFF) |
@@ -249,15 +250,6 @@ int ip6_frag_mem(struct net *net); | |||
249 | 250 | ||
250 | #define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ | 251 | #define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ |
251 | 252 | ||
252 | /* | ||
253 | * Function prototype for build_xmit | ||
254 | */ | ||
255 | |||
256 | typedef int (*inet_getfrag_t) (const void *data, | ||
257 | struct in6_addr *addr, | ||
258 | char *, | ||
259 | unsigned int, unsigned int); | ||
260 | |||
261 | extern int __ipv6_addr_type(const struct in6_addr *addr); | 253 | extern int __ipv6_addr_type(const struct in6_addr *addr); |
262 | static inline int ipv6_addr_type(const struct in6_addr *addr) | 254 | static inline int ipv6_addr_type(const struct in6_addr *addr) |
263 | { | 255 | { |
@@ -288,12 +280,10 @@ static inline int | |||
288 | ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, | 280 | ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, |
289 | const struct in6_addr *a2) | 281 | const struct in6_addr *a2) |
290 | { | 282 | { |
291 | unsigned int i; | 283 | return (!!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | |
292 | 284 | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | | |
293 | for (i = 0; i < 4; i++) | 285 | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | |
294 | if ((a1->s6_addr32[i] ^ a2->s6_addr32[i]) & m->s6_addr32[i]) | 286 | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]))); |
295 | return 1; | ||
296 | return 0; | ||
297 | } | 287 | } |
298 | 288 | ||
299 | static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) | 289 | static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) |
@@ -328,10 +318,10 @@ static inline void ipv6_addr_set(struct in6_addr *addr, | |||
328 | static inline int ipv6_addr_equal(const struct in6_addr *a1, | 318 | static inline int ipv6_addr_equal(const struct in6_addr *a1, |
329 | const struct in6_addr *a2) | 319 | const struct in6_addr *a2) |
330 | { | 320 | { |
331 | return (a1->s6_addr32[0] == a2->s6_addr32[0] && | 321 | return (((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | |
332 | a1->s6_addr32[1] == a2->s6_addr32[1] && | 322 | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | |
333 | a1->s6_addr32[2] == a2->s6_addr32[2] && | 323 | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | |
334 | a1->s6_addr32[3] == a2->s6_addr32[3]); | 324 | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0); |
335 | } | 325 | } |
336 | 326 | ||
337 | static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, | 327 | static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, |
@@ -379,8 +369,18 @@ static inline int ipv6_addr_any(const struct in6_addr *a) | |||
379 | 369 | ||
380 | static inline int ipv6_addr_v4mapped(const struct in6_addr *a) | 370 | static inline int ipv6_addr_v4mapped(const struct in6_addr *a) |
381 | { | 371 | { |
382 | return ((a->s6_addr32[0] | a->s6_addr32[1]) == 0 && | 372 | return ((a->s6_addr32[0] | a->s6_addr32[1] | |
383 | a->s6_addr32[2] == htonl(0x0000ffff)); | 373 | (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0); |
374 | } | ||
375 | |||
376 | /* | ||
377 | * Check for a RFC 4843 ORCHID address | ||
378 | * (Overlay Routable Cryptographic Hash Identifiers) | ||
379 | */ | ||
380 | static inline int ipv6_addr_orchid(const struct in6_addr *a) | ||
381 | { | ||
382 | return ((a->s6_addr32[0] & htonl(0xfffffff0)) | ||
383 | == htonl(0x20010010)); | ||
384 | } | 384 | } |
385 | 385 | ||
386 | /* | 386 | /* |
@@ -451,8 +451,8 @@ extern int ip6_xmit(struct sock *sk, | |||
451 | extern int ip6_nd_hdr(struct sock *sk, | 451 | extern int ip6_nd_hdr(struct sock *sk, |
452 | struct sk_buff *skb, | 452 | struct sk_buff *skb, |
453 | struct net_device *dev, | 453 | struct net_device *dev, |
454 | struct in6_addr *saddr, | 454 | const struct in6_addr *saddr, |
455 | struct in6_addr *daddr, | 455 | const struct in6_addr *daddr, |
456 | int proto, int len); | 456 | int proto, int len); |
457 | 457 | ||
458 | extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); | 458 | extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); |
@@ -499,14 +499,6 @@ extern int ip6_local_out(struct sk_buff *skb); | |||
499 | * Extension header (options) processing | 499 | * Extension header (options) processing |
500 | */ | 500 | */ |
501 | 501 | ||
502 | extern u8 * ipv6_build_nfrag_opts(struct sk_buff *skb, | ||
503 | u8 *prev_hdr, | ||
504 | struct ipv6_txoptions *opt, | ||
505 | struct in6_addr *daddr, | ||
506 | u32 jumbolen); | ||
507 | extern u8 * ipv6_build_frag_opts(struct sk_buff *skb, | ||
508 | u8 *prev_hdr, | ||
509 | struct ipv6_txoptions *opt); | ||
510 | extern void ipv6_push_nfrag_opts(struct sk_buff *skb, | 502 | extern void ipv6_push_nfrag_opts(struct sk_buff *skb, |
511 | struct ipv6_txoptions *opt, | 503 | struct ipv6_txoptions *opt, |
512 | u8 *proto, | 504 | u8 *proto, |
@@ -545,10 +537,6 @@ extern int compat_ipv6_getsockopt(struct sock *sk, | |||
545 | char __user *optval, | 537 | char __user *optval, |
546 | int __user *optlen); | 538 | int __user *optlen); |
547 | 539 | ||
548 | extern int ipv6_packet_init(void); | ||
549 | |||
550 | extern void ipv6_packet_cleanup(void); | ||
551 | |||
552 | extern int ip6_datagram_connect(struct sock *sk, | 540 | extern int ip6_datagram_connect(struct sock *sk, |
553 | struct sockaddr *addr, int addr_len); | 541 | struct sockaddr *addr, int addr_len); |
554 | 542 | ||
@@ -585,14 +573,14 @@ extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, | |||
585 | int __user *optlen); | 573 | int __user *optlen); |
586 | 574 | ||
587 | #ifdef CONFIG_PROC_FS | 575 | #ifdef CONFIG_PROC_FS |
588 | extern int ac6_proc_init(void); | 576 | extern int ac6_proc_init(struct net *net); |
589 | extern void ac6_proc_exit(void); | 577 | extern void ac6_proc_exit(struct net *net); |
590 | extern int raw6_proc_init(void); | 578 | extern int raw6_proc_init(void); |
591 | extern void raw6_proc_exit(void); | 579 | extern void raw6_proc_exit(void); |
592 | extern int tcp6_proc_init(void); | 580 | extern int tcp6_proc_init(struct net *net); |
593 | extern void tcp6_proc_exit(void); | 581 | extern void tcp6_proc_exit(struct net *net); |
594 | extern int udp6_proc_init(void); | 582 | extern int udp6_proc_init(struct net *net); |
595 | extern void udp6_proc_exit(void); | 583 | extern void udp6_proc_exit(struct net *net); |
596 | extern int udplite6_proc_init(void); | 584 | extern int udplite6_proc_init(void); |
597 | extern void udplite6_proc_exit(void); | 585 | extern void udplite6_proc_exit(void); |
598 | extern int ipv6_misc_proc_init(void); | 586 | extern int ipv6_misc_proc_init(void); |
@@ -600,17 +588,11 @@ extern void ipv6_misc_proc_exit(void); | |||
600 | extern int snmp6_register_dev(struct inet6_dev *idev); | 588 | extern int snmp6_register_dev(struct inet6_dev *idev); |
601 | extern int snmp6_unregister_dev(struct inet6_dev *idev); | 589 | extern int snmp6_unregister_dev(struct inet6_dev *idev); |
602 | 590 | ||
603 | extern struct rt6_statistics rt6_stats; | ||
604 | #else | 591 | #else |
605 | static inline int snmp6_register_dev(struct inet6_dev *idev) | 592 | static inline int ac6_proc_init(struct net *net) { return 0; } |
606 | { | 593 | static inline void ac6_proc_exit(struct net *net) { } |
607 | return 0; | 594 | static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; } |
608 | } | 595 | static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } |
609 | |||
610 | static inline int snmp6_unregister_dev(struct inet6_dev *idev) | ||
611 | { | ||
612 | return 0; | ||
613 | } | ||
614 | #endif | 596 | #endif |
615 | 597 | ||
616 | #ifdef CONFIG_SYSCTL | 598 | #ifdef CONFIG_SYSCTL |
diff --git a/include/net/irda/irlan_eth.h b/include/net/irda/irlan_eth.h index 0062347600b9..de5c81691f33 100644 --- a/include/net/irda/irlan_eth.h +++ b/include/net/irda/irlan_eth.h | |||
@@ -29,5 +29,4 @@ struct net_device *alloc_irlandev(const char *name); | |||
29 | int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb); | 29 | int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb); |
30 | 30 | ||
31 | void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow); | 31 | void irlan_eth_flow_indication( void *instance, void *sap, LOCAL_FLOW flow); |
32 | void irlan_eth_send_gratuitous_arp(struct net_device *dev); | ||
33 | #endif | 32 | #endif |
diff --git a/include/net/llc_if.h b/include/net/llc_if.h index c608812a8e89..b595a004d31b 100644 --- a/include/net/llc_if.h +++ b/include/net/llc_if.h | |||
@@ -74,11 +74,6 @@ static inline int llc_mac_null(const u8 *mac) | |||
74 | return is_zero_ether_addr(mac); | 74 | return is_zero_ether_addr(mac); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline int llc_addrany(const struct llc_addr *addr) | ||
78 | { | ||
79 | return llc_mac_null(addr->mac) && !addr->lsap; | ||
80 | } | ||
81 | |||
82 | static inline int llc_mac_multicast(const u8 *mac) | 77 | static inline int llc_mac_multicast(const u8 *mac) |
83 | { | 78 | { |
84 | return is_multicast_ether_addr(mac); | 79 | return is_multicast_ether_addr(mac); |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 9083bafb63ca..4a80d74975e8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -38,7 +38,11 @@ | |||
38 | * called in hardware interrupt context. The low-level driver must not call any | 38 | * called in hardware interrupt context. The low-level driver must not call any |
39 | * other functions in hardware interrupt context. If there is a need for such | 39 | * other functions in hardware interrupt context. If there is a need for such |
40 | * call, the low-level driver should first ACK the interrupt and perform the | 40 | * call, the low-level driver should first ACK the interrupt and perform the |
41 | * IEEE 802.11 code call after this, e.g. from a scheduled workqueue function. | 41 | * IEEE 802.11 code call after this, e.g. from a scheduled workqueue or even |
42 | * tasklet function. | ||
43 | * | ||
44 | * NOTE: If the driver opts to use the _irqsafe() functions, it may not also | ||
45 | * use the non-irqsafe functions! | ||
42 | */ | 46 | */ |
43 | 47 | ||
44 | /** | 48 | /** |
@@ -69,93 +73,12 @@ | |||
69 | * not do so then mac80211 may add this under certain circumstances. | 73 | * not do so then mac80211 may add this under certain circumstances. |
70 | */ | 74 | */ |
71 | 75 | ||
72 | #define IEEE80211_CHAN_W_SCAN 0x00000001 | ||
73 | #define IEEE80211_CHAN_W_ACTIVE_SCAN 0x00000002 | ||
74 | #define IEEE80211_CHAN_W_IBSS 0x00000004 | ||
75 | |||
76 | /* Channel information structure. Low-level driver is expected to fill in chan, | ||
77 | * freq, and val fields. Other fields will be filled in by 80211.o based on | ||
78 | * hostapd information and low-level driver does not need to use them. The | ||
79 | * limits for each channel will be provided in 'struct ieee80211_conf' when | ||
80 | * configuring the low-level driver with hw->config callback. If a device has | ||
81 | * a default regulatory domain, IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED | ||
82 | * can be set to let the driver configure all fields */ | ||
83 | struct ieee80211_channel { | ||
84 | short chan; /* channel number (IEEE 802.11) */ | ||
85 | short freq; /* frequency in MHz */ | ||
86 | int val; /* hw specific value for the channel */ | ||
87 | int flag; /* flag for hostapd use (IEEE80211_CHAN_*) */ | ||
88 | unsigned char power_level; | ||
89 | unsigned char antenna_max; | ||
90 | }; | ||
91 | |||
92 | #define IEEE80211_RATE_ERP 0x00000001 | ||
93 | #define IEEE80211_RATE_BASIC 0x00000002 | ||
94 | #define IEEE80211_RATE_PREAMBLE2 0x00000004 | ||
95 | #define IEEE80211_RATE_SUPPORTED 0x00000010 | ||
96 | #define IEEE80211_RATE_OFDM 0x00000020 | ||
97 | #define IEEE80211_RATE_CCK 0x00000040 | ||
98 | #define IEEE80211_RATE_MANDATORY 0x00000100 | ||
99 | |||
100 | #define IEEE80211_RATE_CCK_2 (IEEE80211_RATE_CCK | IEEE80211_RATE_PREAMBLE2) | ||
101 | #define IEEE80211_RATE_MODULATION(f) \ | ||
102 | (f & (IEEE80211_RATE_CCK | IEEE80211_RATE_OFDM)) | ||
103 | |||
104 | /* Low-level driver should set PREAMBLE2, OFDM and CCK flags. | ||
105 | * BASIC, SUPPORTED, ERP, and MANDATORY flags are set in 80211.o based on the | ||
106 | * configuration. */ | ||
107 | struct ieee80211_rate { | ||
108 | int rate; /* rate in 100 kbps */ | ||
109 | int val; /* hw specific value for the rate */ | ||
110 | int flags; /* IEEE80211_RATE_ flags */ | ||
111 | int val2; /* hw specific value for the rate when using short preamble | ||
112 | * (only when IEEE80211_RATE_PREAMBLE2 flag is set, i.e., for | ||
113 | * 2, 5.5, and 11 Mbps) */ | ||
114 | signed char min_rssi_ack; | ||
115 | unsigned char min_rssi_ack_delta; | ||
116 | |||
117 | /* following fields are set by 80211.o and need not be filled by the | ||
118 | * low-level driver */ | ||
119 | int rate_inv; /* inverse of the rate (LCM(all rates) / rate) for | ||
120 | * optimizing channel utilization estimates */ | ||
121 | }; | ||
122 | |||
123 | /** | 76 | /** |
124 | * enum ieee80211_phymode - PHY modes | 77 | * enum ieee80211_notification_type - Low level driver notification |
125 | * | 78 | * @IEEE80211_NOTIFY_RE_ASSOC: start the re-association sequence |
126 | * @MODE_IEEE80211A: 5GHz as defined by 802.11a/802.11h | ||
127 | * @MODE_IEEE80211B: 2.4 GHz as defined by 802.11b | ||
128 | * @MODE_IEEE80211G: 2.4 GHz as defined by 802.11g (with OFDM), | ||
129 | * backwards compatible with 11b mode | ||
130 | * @NUM_IEEE80211_MODES: internal | ||
131 | */ | 79 | */ |
132 | enum ieee80211_phymode { | 80 | enum ieee80211_notification_types { |
133 | MODE_IEEE80211A, | 81 | IEEE80211_NOTIFY_RE_ASSOC, |
134 | MODE_IEEE80211B, | ||
135 | MODE_IEEE80211G, | ||
136 | |||
137 | /* keep last */ | ||
138 | NUM_IEEE80211_MODES | ||
139 | }; | ||
140 | |||
141 | /** | ||
142 | * struct ieee80211_ht_info - describing STA's HT capabilities | ||
143 | * | ||
144 | * This structure describes most essential parameters needed | ||
145 | * to describe 802.11n HT capabilities for an STA. | ||
146 | * | ||
147 | * @ht_supported: is HT supported by STA, 0: no, 1: yes | ||
148 | * @cap: HT capabilities map as described in 802.11n spec | ||
149 | * @ampdu_factor: Maximum A-MPDU length factor | ||
150 | * @ampdu_density: Minimum A-MPDU spacing | ||
151 | * @supp_mcs_set: Supported MCS set as described in 802.11n spec | ||
152 | */ | ||
153 | struct ieee80211_ht_info { | ||
154 | u8 ht_supported; | ||
155 | u16 cap; /* use IEEE80211_HT_CAP_ */ | ||
156 | u8 ampdu_factor; | ||
157 | u8 ampdu_density; | ||
158 | u8 supp_mcs_set[16]; | ||
159 | }; | 82 | }; |
160 | 83 | ||
161 | /** | 84 | /** |
@@ -175,46 +98,22 @@ struct ieee80211_ht_bss_info { | |||
175 | }; | 98 | }; |
176 | 99 | ||
177 | /** | 100 | /** |
178 | * struct ieee80211_hw_mode - PHY mode definition | ||
179 | * | ||
180 | * This structure describes the capabilities supported by the device | ||
181 | * in a single PHY mode. | ||
182 | * | ||
183 | * @list: internal | ||
184 | * @channels: pointer to array of supported channels | ||
185 | * @rates: pointer to array of supported bitrates | ||
186 | * @mode: the PHY mode for this definition | ||
187 | * @num_channels: number of supported channels | ||
188 | * @num_rates: number of supported bitrates | ||
189 | * @ht_info: PHY's 802.11n HT abilities for this mode | ||
190 | */ | ||
191 | struct ieee80211_hw_mode { | ||
192 | struct list_head list; | ||
193 | struct ieee80211_channel *channels; | ||
194 | struct ieee80211_rate *rates; | ||
195 | enum ieee80211_phymode mode; | ||
196 | int num_channels; | ||
197 | int num_rates; | ||
198 | struct ieee80211_ht_info ht_info; | ||
199 | }; | ||
200 | |||
201 | /** | ||
202 | * struct ieee80211_tx_queue_params - transmit queue configuration | 101 | * struct ieee80211_tx_queue_params - transmit queue configuration |
203 | * | 102 | * |
204 | * The information provided in this structure is required for QoS | 103 | * The information provided in this structure is required for QoS |
205 | * transmit queue configuration. | 104 | * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29. |
206 | * | 105 | * |
207 | * @aifs: arbitration interface space [0..255, -1: use default] | 106 | * @aifs: arbitration interface space [0..255, -1: use default] |
208 | * @cw_min: minimum contention window [will be a value of the form | 107 | * @cw_min: minimum contention window [will be a value of the form |
209 | * 2^n-1 in the range 1..1023; 0: use default] | 108 | * 2^n-1 in the range 1..1023; 0: use default] |
210 | * @cw_max: maximum contention window [like @cw_min] | 109 | * @cw_max: maximum contention window [like @cw_min] |
211 | * @burst_time: maximum burst time in units of 0.1ms, 0 meaning disabled | 110 | * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled |
212 | */ | 111 | */ |
213 | struct ieee80211_tx_queue_params { | 112 | struct ieee80211_tx_queue_params { |
214 | int aifs; | 113 | s16 aifs; |
215 | int cw_min; | 114 | u16 cw_min; |
216 | int cw_max; | 115 | u16 cw_max; |
217 | int burst_time; | 116 | u16 txop; |
218 | }; | 117 | }; |
219 | 118 | ||
220 | /** | 119 | /** |
@@ -246,6 +145,7 @@ struct ieee80211_tx_queue_stats_data { | |||
246 | * @IEEE80211_TX_QUEUE_AFTER_BEACON: transmit queue for frames to be | 145 | * @IEEE80211_TX_QUEUE_AFTER_BEACON: transmit queue for frames to be |
247 | * sent after a beacon | 146 | * sent after a beacon |
248 | * @IEEE80211_TX_QUEUE_BEACON: transmit queue for beacon frames | 147 | * @IEEE80211_TX_QUEUE_BEACON: transmit queue for beacon frames |
148 | * @NUM_TX_DATA_QUEUES_AMPDU: adding more queues for A-MPDU | ||
249 | */ | 149 | */ |
250 | enum ieee80211_tx_queue { | 150 | enum ieee80211_tx_queue { |
251 | IEEE80211_TX_QUEUE_DATA0, | 151 | IEEE80211_TX_QUEUE_DATA0, |
@@ -261,11 +161,12 @@ enum ieee80211_tx_queue { | |||
261 | * this struct need to have fixed values. As soon as it is removed, we can | 161 | * this struct need to have fixed values. As soon as it is removed, we can |
262 | * fix these entries. */ | 162 | * fix these entries. */ |
263 | IEEE80211_TX_QUEUE_AFTER_BEACON = 6, | 163 | IEEE80211_TX_QUEUE_AFTER_BEACON = 6, |
264 | IEEE80211_TX_QUEUE_BEACON = 7 | 164 | IEEE80211_TX_QUEUE_BEACON = 7, |
165 | NUM_TX_DATA_QUEUES_AMPDU = 16 | ||
265 | }; | 166 | }; |
266 | 167 | ||
267 | struct ieee80211_tx_queue_stats { | 168 | struct ieee80211_tx_queue_stats { |
268 | struct ieee80211_tx_queue_stats_data data[NUM_TX_DATA_QUEUES]; | 169 | struct ieee80211_tx_queue_stats_data data[NUM_TX_DATA_QUEUES_AMPDU]; |
269 | }; | 170 | }; |
270 | 171 | ||
271 | struct ieee80211_low_level_stats { | 172 | struct ieee80211_low_level_stats { |
@@ -285,11 +186,13 @@ struct ieee80211_low_level_stats { | |||
285 | * also implies a change in the AID. | 186 | * also implies a change in the AID. |
286 | * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed | 187 | * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed |
287 | * @BSS_CHANGED_ERP_PREAMBLE: preamble changed | 188 | * @BSS_CHANGED_ERP_PREAMBLE: preamble changed |
189 | * @BSS_CHANGED_HT: 802.11n parameters changed | ||
288 | */ | 190 | */ |
289 | enum ieee80211_bss_change { | 191 | enum ieee80211_bss_change { |
290 | BSS_CHANGED_ASSOC = 1<<0, | 192 | BSS_CHANGED_ASSOC = 1<<0, |
291 | BSS_CHANGED_ERP_CTS_PROT = 1<<1, | 193 | BSS_CHANGED_ERP_CTS_PROT = 1<<1, |
292 | BSS_CHANGED_ERP_PREAMBLE = 1<<2, | 194 | BSS_CHANGED_ERP_PREAMBLE = 1<<2, |
195 | BSS_CHANGED_HT = 1<<4, | ||
293 | }; | 196 | }; |
294 | 197 | ||
295 | /** | 198 | /** |
@@ -302,6 +205,12 @@ enum ieee80211_bss_change { | |||
302 | * @aid: association ID number, valid only when @assoc is true | 205 | * @aid: association ID number, valid only when @assoc is true |
303 | * @use_cts_prot: use CTS protection | 206 | * @use_cts_prot: use CTS protection |
304 | * @use_short_preamble: use 802.11b short preamble | 207 | * @use_short_preamble: use 802.11b short preamble |
208 | * @timestamp: beacon timestamp | ||
209 | * @beacon_int: beacon interval | ||
210 | * @assoc_capability: capabbilities taken from assoc resp | ||
211 | * @assoc_ht: association in HT mode | ||
212 | * @ht_conf: ht capabilities | ||
213 | * @ht_bss_conf: ht extended capabilities | ||
305 | */ | 214 | */ |
306 | struct ieee80211_bss_conf { | 215 | struct ieee80211_bss_conf { |
307 | /* association related data */ | 216 | /* association related data */ |
@@ -310,6 +219,69 @@ struct ieee80211_bss_conf { | |||
310 | /* erp related data */ | 219 | /* erp related data */ |
311 | bool use_cts_prot; | 220 | bool use_cts_prot; |
312 | bool use_short_preamble; | 221 | bool use_short_preamble; |
222 | u16 beacon_int; | ||
223 | u16 assoc_capability; | ||
224 | u64 timestamp; | ||
225 | /* ht related data */ | ||
226 | bool assoc_ht; | ||
227 | struct ieee80211_ht_info *ht_conf; | ||
228 | struct ieee80211_ht_bss_info *ht_bss_conf; | ||
229 | }; | ||
230 | |||
231 | /** | ||
232 | * enum mac80211_tx_control_flags - flags to describe Tx configuration for | ||
233 | * the Tx frame | ||
234 | * | ||
235 | * These flags are used with the @flags member of &ieee80211_tx_control | ||
236 | * | ||
237 | * @IEEE80211_TXCTL_REQ_TX_STATUS: request TX status callback for this frame. | ||
238 | * @IEEE80211_TXCTL_DO_NOT_ENCRYPT: send this frame without encryption; | ||
239 | * e.g., for EAPOL frame | ||
240 | * @IEEE80211_TXCTL_USE_RTS_CTS: use RTS-CTS before sending frame | ||
241 | * @IEEE80211_TXCTL_USE_CTS_PROTECT: use CTS protection for the frame (e.g., | ||
242 | * for combined 802.11g / 802.11b networks) | ||
243 | * @IEEE80211_TXCTL_NO_ACK: tell the low level not to wait for an ack | ||
244 | * @IEEE80211_TXCTL_RATE_CTRL_PROBE | ||
245 | * @EEE80211_TXCTL_CLEAR_PS_FILT: clear powersave filter | ||
246 | * for destination station | ||
247 | * @IEEE80211_TXCTL_REQUEUE: | ||
248 | * @IEEE80211_TXCTL_FIRST_FRAGMENT: this is a first fragment of the frame | ||
249 | * @IEEE80211_TXCTL_LONG_RETRY_LIMIT: this frame should be send using the | ||
250 | * through set_retry_limit configured long | ||
251 | * retry value | ||
252 | * @IEEE80211_TXCTL_EAPOL_FRAME: internal to mac80211 | ||
253 | * @IEEE80211_TXCTL_SEND_AFTER_DTIM: send this frame after DTIM beacon | ||
254 | * @IEEE80211_TXCTL_AMPDU: this frame should be sent as part of an A-MPDU | ||
255 | * @IEEE80211_TXCTL_OFDM_HT: this frame can be sent in HT OFDM rates. number | ||
256 | * of streams when this flag is on can be extracted | ||
257 | * from antenna_sel_tx, so if 1 antenna is marked | ||
258 | * use SISO, 2 antennas marked use MIMO, n antennas | ||
259 | * marked use MIMO_n. | ||
260 | * @IEEE80211_TXCTL_GREEN_FIELD: use green field protection for this frame | ||
261 | * @IEEE80211_TXCTL_40_MHZ_WIDTH: send this frame using 40 Mhz channel width | ||
262 | * @IEEE80211_TXCTL_DUP_DATA: duplicate data frame on both 20 Mhz channels | ||
263 | * @IEEE80211_TXCTL_SHORT_GI: send this frame using short guard interval | ||
264 | */ | ||
265 | enum mac80211_tx_control_flags { | ||
266 | IEEE80211_TXCTL_REQ_TX_STATUS = (1<<0), | ||
267 | IEEE80211_TXCTL_DO_NOT_ENCRYPT = (1<<1), | ||
268 | IEEE80211_TXCTL_USE_RTS_CTS = (1<<2), | ||
269 | IEEE80211_TXCTL_USE_CTS_PROTECT = (1<<3), | ||
270 | IEEE80211_TXCTL_NO_ACK = (1<<4), | ||
271 | IEEE80211_TXCTL_RATE_CTRL_PROBE = (1<<5), | ||
272 | IEEE80211_TXCTL_CLEAR_PS_FILT = (1<<6), | ||
273 | IEEE80211_TXCTL_REQUEUE = (1<<7), | ||
274 | IEEE80211_TXCTL_FIRST_FRAGMENT = (1<<8), | ||
275 | IEEE80211_TXCTL_SHORT_PREAMBLE = (1<<9), | ||
276 | IEEE80211_TXCTL_LONG_RETRY_LIMIT = (1<<10), | ||
277 | IEEE80211_TXCTL_EAPOL_FRAME = (1<<11), | ||
278 | IEEE80211_TXCTL_SEND_AFTER_DTIM = (1<<12), | ||
279 | IEEE80211_TXCTL_AMPDU = (1<<13), | ||
280 | IEEE80211_TXCTL_OFDM_HT = (1<<14), | ||
281 | IEEE80211_TXCTL_GREEN_FIELD = (1<<15), | ||
282 | IEEE80211_TXCTL_40_MHZ_WIDTH = (1<<16), | ||
283 | IEEE80211_TXCTL_DUP_DATA = (1<<17), | ||
284 | IEEE80211_TXCTL_SHORT_GI = (1<<18), | ||
313 | }; | 285 | }; |
314 | 286 | ||
315 | /* Transmit control fields. This data structure is passed to low-level driver | 287 | /* Transmit control fields. This data structure is passed to low-level driver |
@@ -318,57 +290,27 @@ struct ieee80211_bss_conf { | |||
318 | 290 | ||
319 | struct ieee80211_tx_control { | 291 | struct ieee80211_tx_control { |
320 | struct ieee80211_vif *vif; | 292 | struct ieee80211_vif *vif; |
321 | int tx_rate; /* Transmit rate, given as the hw specific value for the | 293 | struct ieee80211_rate *tx_rate; |
322 | * rate (from struct ieee80211_rate) */ | 294 | |
323 | int rts_cts_rate; /* Transmit rate for RTS/CTS frame, given as the hw | 295 | /* Transmit rate for RTS/CTS frame */ |
324 | * specific value for the rate (from | 296 | struct ieee80211_rate *rts_cts_rate; |
325 | * struct ieee80211_rate) */ | 297 | |
326 | 298 | /* retry rate for the last retries */ | |
327 | #define IEEE80211_TXCTL_REQ_TX_STATUS (1<<0)/* request TX status callback for | 299 | struct ieee80211_rate *alt_retry_rate; |
328 | * this frame */ | 300 | |
329 | #define IEEE80211_TXCTL_DO_NOT_ENCRYPT (1<<1) /* send this frame without | 301 | u32 flags; /* tx control flags defined above */ |
330 | * encryption; e.g., for EAPOL | ||
331 | * frames */ | ||
332 | #define IEEE80211_TXCTL_USE_RTS_CTS (1<<2) /* use RTS-CTS before sending | ||
333 | * frame */ | ||
334 | #define IEEE80211_TXCTL_USE_CTS_PROTECT (1<<3) /* use CTS protection for the | ||
335 | * frame (e.g., for combined | ||
336 | * 802.11g / 802.11b networks) */ | ||
337 | #define IEEE80211_TXCTL_NO_ACK (1<<4) /* tell the low level not to | ||
338 | * wait for an ack */ | ||
339 | #define IEEE80211_TXCTL_RATE_CTRL_PROBE (1<<5) | ||
340 | #define IEEE80211_TXCTL_CLEAR_DST_MASK (1<<6) | ||
341 | #define IEEE80211_TXCTL_REQUEUE (1<<7) | ||
342 | #define IEEE80211_TXCTL_FIRST_FRAGMENT (1<<8) /* this is a first fragment of | ||
343 | * the frame */ | ||
344 | #define IEEE80211_TXCTL_LONG_RETRY_LIMIT (1<<10) /* this frame should be send | ||
345 | * using the through | ||
346 | * set_retry_limit configured | ||
347 | * long retry value */ | ||
348 | #define IEEE80211_TXCTL_EAPOL_FRAME (1<<11) /* internal to mac80211 */ | ||
349 | #define IEEE80211_TXCTL_SEND_AFTER_DTIM (1<<12) /* send this frame after DTIM | ||
350 | * beacon */ | ||
351 | u32 flags; /* tx control flags defined | ||
352 | * above */ | ||
353 | u8 key_idx; /* keyidx from hw->set_key(), undefined if | 302 | u8 key_idx; /* keyidx from hw->set_key(), undefined if |
354 | * IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */ | 303 | * IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */ |
355 | u8 retry_limit; /* 1 = only first attempt, 2 = one retry, .. | 304 | u8 retry_limit; /* 1 = only first attempt, 2 = one retry, .. |
356 | * This could be used when set_retry_limit | 305 | * This could be used when set_retry_limit |
357 | * is not implemented by the driver */ | 306 | * is not implemented by the driver */ |
358 | u8 power_level; /* per-packet transmit power level, in dBm */ | 307 | u8 antenna_sel_tx; /* 0 = default/diversity, otherwise bit |
359 | u8 antenna_sel_tx; /* 0 = default/diversity, 1 = Ant0, 2 = Ant1 */ | 308 | * position represents antenna number used */ |
360 | u8 icv_len; /* length of the ICV/MIC field in octets */ | 309 | u8 icv_len; /* length of the ICV/MIC field in octets */ |
361 | u8 iv_len; /* length of the IV field in octets */ | 310 | u8 iv_len; /* length of the IV field in octets */ |
362 | u8 queue; /* hardware queue to use for this frame; | 311 | u8 queue; /* hardware queue to use for this frame; |
363 | * 0 = highest, hw->queues-1 = lowest */ | 312 | * 0 = highest, hw->queues-1 = lowest */ |
364 | struct ieee80211_rate *rate; /* internal 80211.o rate */ | 313 | u16 aid; /* Station AID */ |
365 | struct ieee80211_rate *rts_rate; /* internal 80211.o rate | ||
366 | * for RTS/CTS */ | ||
367 | int alt_retry_rate; /* retry rate for the last retries, given as the | ||
368 | * hw specific value for the rate (from | ||
369 | * struct ieee80211_rate). To be used to limit | ||
370 | * packet dropping when probing higher rates, if hw | ||
371 | * supports multiple retry rates. -1 = not used */ | ||
372 | int type; /* internal */ | 314 | int type; /* internal */ |
373 | }; | 315 | }; |
374 | 316 | ||
@@ -391,7 +333,8 @@ struct ieee80211_tx_control { | |||
391 | * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on | 333 | * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on |
392 | * the frame. | 334 | * the frame. |
393 | * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) | 335 | * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) |
394 | * is valid. | 336 | * is valid. This is useful in monitor mode and necessary for beacon frames |
337 | * to enable IBSS merging. | ||
395 | */ | 338 | */ |
396 | enum mac80211_rx_flags { | 339 | enum mac80211_rx_flags { |
397 | RX_FLAG_MMIC_ERROR = 1<<0, | 340 | RX_FLAG_MMIC_ERROR = 1<<0, |
@@ -410,27 +353,26 @@ enum mac80211_rx_flags { | |||
410 | * The low-level driver should provide this information (the subset | 353 | * The low-level driver should provide this information (the subset |
411 | * supported by hardware) to the 802.11 code with each received | 354 | * supported by hardware) to the 802.11 code with each received |
412 | * frame. | 355 | * frame. |
413 | * @mactime: MAC timestamp as defined by 802.11 | 356 | * @mactime: value in microseconds of the 64-bit Time Synchronization Function |
357 | * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. | ||
358 | * @band: the active band when this frame was received | ||
414 | * @freq: frequency the radio was tuned to when receiving this frame, in MHz | 359 | * @freq: frequency the radio was tuned to when receiving this frame, in MHz |
415 | * @channel: channel the radio was tuned to | ||
416 | * @phymode: active PHY mode | ||
417 | * @ssi: signal strength when receiving this frame | 360 | * @ssi: signal strength when receiving this frame |
418 | * @signal: used as 'qual' in statistics reporting | 361 | * @signal: used as 'qual' in statistics reporting |
419 | * @noise: PHY noise when receiving this frame | 362 | * @noise: PHY noise when receiving this frame |
420 | * @antenna: antenna used | 363 | * @antenna: antenna used |
421 | * @rate: data rate | 364 | * @rate_idx: index of data rate into band's supported rates |
422 | * @flag: %RX_FLAG_* | 365 | * @flag: %RX_FLAG_* |
423 | */ | 366 | */ |
424 | struct ieee80211_rx_status { | 367 | struct ieee80211_rx_status { |
425 | u64 mactime; | 368 | u64 mactime; |
369 | enum ieee80211_band band; | ||
426 | int freq; | 370 | int freq; |
427 | int channel; | ||
428 | enum ieee80211_phymode phymode; | ||
429 | int ssi; | 371 | int ssi; |
430 | int signal; | 372 | int signal; |
431 | int noise; | 373 | int noise; |
432 | int antenna; | 374 | int antenna; |
433 | int rate; | 375 | int rate_idx; |
434 | int flag; | 376 | int flag; |
435 | }; | 377 | }; |
436 | 378 | ||
@@ -441,12 +383,14 @@ struct ieee80211_rx_status { | |||
441 | * | 383 | * |
442 | * @IEEE80211_TX_STATUS_TX_FILTERED: The frame was not transmitted | 384 | * @IEEE80211_TX_STATUS_TX_FILTERED: The frame was not transmitted |
443 | * because the destination STA was in powersave mode. | 385 | * because the destination STA was in powersave mode. |
444 | * | ||
445 | * @IEEE80211_TX_STATUS_ACK: Frame was acknowledged | 386 | * @IEEE80211_TX_STATUS_ACK: Frame was acknowledged |
387 | * @IEEE80211_TX_STATUS_AMPDU: The frame was aggregated, so status | ||
388 | * is for the whole aggregation. | ||
446 | */ | 389 | */ |
447 | enum ieee80211_tx_status_flags { | 390 | enum ieee80211_tx_status_flags { |
448 | IEEE80211_TX_STATUS_TX_FILTERED = 1<<0, | 391 | IEEE80211_TX_STATUS_TX_FILTERED = 1<<0, |
449 | IEEE80211_TX_STATUS_ACK = 1<<1, | 392 | IEEE80211_TX_STATUS_ACK = 1<<1, |
393 | IEEE80211_TX_STATUS_AMPDU = 1<<2, | ||
450 | }; | 394 | }; |
451 | 395 | ||
452 | /** | 396 | /** |
@@ -457,24 +401,25 @@ enum ieee80211_tx_status_flags { | |||
457 | * | 401 | * |
458 | * @control: a copy of the &struct ieee80211_tx_control passed to the driver | 402 | * @control: a copy of the &struct ieee80211_tx_control passed to the driver |
459 | * in the tx() callback. | 403 | * in the tx() callback. |
460 | * | ||
461 | * @flags: transmit status flags, defined above | 404 | * @flags: transmit status flags, defined above |
462 | * | 405 | * @retry_count: number of retries |
463 | * @ack_signal: signal strength of the ACK frame | ||
464 | * | ||
465 | * @excessive_retries: set to 1 if the frame was retried many times | 406 | * @excessive_retries: set to 1 if the frame was retried many times |
466 | * but not acknowledged | 407 | * but not acknowledged |
467 | * | 408 | * @ampdu_ack_len: number of aggregated frames. |
468 | * @retry_count: number of retries | 409 | * relevant only if IEEE80211_TX_STATUS_AMPDU was set. |
469 | * | 410 | * @ampdu_ack_map: block ack bit map for the aggregation. |
411 | * relevant only if IEEE80211_TX_STATUS_AMPDU was set. | ||
412 | * @ack_signal: signal strength of the ACK frame | ||
470 | * @queue_length: ?? REMOVE | 413 | * @queue_length: ?? REMOVE |
471 | * @queue_number: ?? REMOVE | 414 | * @queue_number: ?? REMOVE |
472 | */ | 415 | */ |
473 | struct ieee80211_tx_status { | 416 | struct ieee80211_tx_status { |
474 | struct ieee80211_tx_control control; | 417 | struct ieee80211_tx_control control; |
475 | u8 flags; | 418 | u8 flags; |
476 | bool excessive_retries; | ||
477 | u8 retry_count; | 419 | u8 retry_count; |
420 | bool excessive_retries; | ||
421 | u8 ampdu_ack_len; | ||
422 | u64 ampdu_ack_map; | ||
478 | int ack_signal; | 423 | int ack_signal; |
479 | int queue_length; | 424 | int queue_length; |
480 | int queue_number; | 425 | int queue_number; |
@@ -502,41 +447,29 @@ enum ieee80211_conf_flags { | |||
502 | * | 447 | * |
503 | * @radio_enabled: when zero, driver is required to switch off the radio. | 448 | * @radio_enabled: when zero, driver is required to switch off the radio. |
504 | * TODO make a flag | 449 | * TODO make a flag |
505 | * @channel: IEEE 802.11 channel number | ||
506 | * @freq: frequency in MHz | ||
507 | * @channel_val: hardware specific channel value for the channel | ||
508 | * @phymode: PHY mode to activate (REMOVE) | ||
509 | * @chan: channel to switch to, pointer to the channel information | ||
510 | * @mode: pointer to mode definition | ||
511 | * @regulatory_domain: ?? | ||
512 | * @beacon_int: beacon interval (TODO make interface config) | 450 | * @beacon_int: beacon interval (TODO make interface config) |
513 | * @flags: configuration flags defined above | 451 | * @flags: configuration flags defined above |
514 | * @power_level: transmit power limit for current regulatory domain in dBm | 452 | * @power_level: requested transmit power (in dBm) |
515 | * @antenna_max: maximum antenna gain | 453 | * @max_antenna_gain: maximum antenna gain (in dBi) |
516 | * @antenna_sel_tx: transmit antenna selection, 0: default/diversity, | 454 | * @antenna_sel_tx: transmit antenna selection, 0: default/diversity, |
517 | * 1/2: antenna 0/1 | 455 | * 1/2: antenna 0/1 |
518 | * @antenna_sel_rx: receive antenna selection, like @antenna_sel_tx | 456 | * @antenna_sel_rx: receive antenna selection, like @antenna_sel_tx |
519 | * @ht_conf: describes current self configuration of 802.11n HT capabilies | 457 | * @ht_conf: describes current self configuration of 802.11n HT capabilies |
520 | * @ht_bss_conf: describes current BSS configuration of 802.11n HT parameters | 458 | * @ht_bss_conf: describes current BSS configuration of 802.11n HT parameters |
459 | * @channel: the channel to tune to | ||
521 | */ | 460 | */ |
522 | struct ieee80211_conf { | 461 | struct ieee80211_conf { |
523 | int channel; /* IEEE 802.11 channel number */ | ||
524 | int freq; /* MHz */ | ||
525 | int channel_val; /* hw specific value for the channel */ | ||
526 | |||
527 | enum ieee80211_phymode phymode; | ||
528 | struct ieee80211_channel *chan; | ||
529 | struct ieee80211_hw_mode *mode; | ||
530 | unsigned int regulatory_domain; | ||
531 | int radio_enabled; | 462 | int radio_enabled; |
532 | 463 | ||
533 | int beacon_int; | 464 | int beacon_int; |
534 | u32 flags; | 465 | u32 flags; |
535 | u8 power_level; | 466 | int power_level; |
536 | u8 antenna_max; | 467 | int max_antenna_gain; |
537 | u8 antenna_sel_tx; | 468 | u8 antenna_sel_tx; |
538 | u8 antenna_sel_rx; | 469 | u8 antenna_sel_rx; |
539 | 470 | ||
471 | struct ieee80211_channel *channel; | ||
472 | |||
540 | struct ieee80211_ht_info ht_conf; | 473 | struct ieee80211_ht_info ht_conf; |
541 | struct ieee80211_ht_bss_info ht_bss_conf; | 474 | struct ieee80211_ht_bss_info ht_bss_conf; |
542 | }; | 475 | }; |
@@ -555,12 +488,14 @@ struct ieee80211_conf { | |||
555 | * @IEEE80211_IF_TYPE_WDS: interface in WDS mode. | 488 | * @IEEE80211_IF_TYPE_WDS: interface in WDS mode. |
556 | * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers | 489 | * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers |
557 | * will never see this type. | 490 | * will never see this type. |
491 | * @IEEE80211_IF_TYPE_MESH_POINT: 802.11s mesh point | ||
558 | */ | 492 | */ |
559 | enum ieee80211_if_types { | 493 | enum ieee80211_if_types { |
560 | IEEE80211_IF_TYPE_INVALID, | 494 | IEEE80211_IF_TYPE_INVALID, |
561 | IEEE80211_IF_TYPE_AP, | 495 | IEEE80211_IF_TYPE_AP, |
562 | IEEE80211_IF_TYPE_STA, | 496 | IEEE80211_IF_TYPE_STA, |
563 | IEEE80211_IF_TYPE_IBSS, | 497 | IEEE80211_IF_TYPE_IBSS, |
498 | IEEE80211_IF_TYPE_MESH_POINT, | ||
564 | IEEE80211_IF_TYPE_MNTR, | 499 | IEEE80211_IF_TYPE_MNTR, |
565 | IEEE80211_IF_TYPE_WDS, | 500 | IEEE80211_IF_TYPE_WDS, |
566 | IEEE80211_IF_TYPE_VLAN, | 501 | IEEE80211_IF_TYPE_VLAN, |
@@ -582,6 +517,14 @@ struct ieee80211_vif { | |||
582 | u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); | 517 | u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); |
583 | }; | 518 | }; |
584 | 519 | ||
520 | static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) | ||
521 | { | ||
522 | #ifdef CONFIG_MAC80211_MESH | ||
523 | return vif->type == IEEE80211_IF_TYPE_MESH_POINT; | ||
524 | #endif | ||
525 | return false; | ||
526 | } | ||
527 | |||
585 | /** | 528 | /** |
586 | * struct ieee80211_if_init_conf - initial configuration of an interface | 529 | * struct ieee80211_if_init_conf - initial configuration of an interface |
587 | * | 530 | * |
@@ -725,6 +668,21 @@ enum sta_notify_cmd { | |||
725 | }; | 668 | }; |
726 | 669 | ||
727 | /** | 670 | /** |
671 | * enum ieee80211_tkip_key_type - get tkip key | ||
672 | * | ||
673 | * Used by drivers which need to get a tkip key for skb. Some drivers need a | ||
674 | * phase 1 key, others need a phase 2 key. A single function allows the driver | ||
675 | * to get the key, this enum indicates what type of key is required. | ||
676 | * | ||
677 | * @IEEE80211_TKIP_P1_KEY: the driver needs a phase 1 key | ||
678 | * @IEEE80211_TKIP_P2_KEY: the driver needs a phase 2 key | ||
679 | */ | ||
680 | enum ieee80211_tkip_key_type { | ||
681 | IEEE80211_TKIP_P1_KEY, | ||
682 | IEEE80211_TKIP_P2_KEY, | ||
683 | }; | ||
684 | |||
685 | /** | ||
728 | * enum ieee80211_hw_flags - hardware flags | 686 | * enum ieee80211_hw_flags - hardware flags |
729 | * | 687 | * |
730 | * These flags are used to indicate hardware capabilities to | 688 | * These flags are used to indicate hardware capabilities to |
@@ -757,15 +715,19 @@ enum sta_notify_cmd { | |||
757 | * %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because | 715 | * %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because |
758 | * otherwise the stack will not know when the DTIM beacon was sent. | 716 | * otherwise the stack will not know when the DTIM beacon was sent. |
759 | * | 717 | * |
760 | * @IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED: | 718 | * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE: |
761 | * Channels are already configured to the default regulatory domain | 719 | * Hardware is not capable of short slot operation on the 2.4 GHz band. |
762 | * specified in the device's EEPROM | 720 | * |
721 | * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE: | ||
722 | * Hardware is not capable of receiving frames with short preamble on | ||
723 | * the 2.4 GHz band. | ||
763 | */ | 724 | */ |
764 | enum ieee80211_hw_flags { | 725 | enum ieee80211_hw_flags { |
765 | IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0, | 726 | IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0, |
766 | IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, | 727 | IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, |
767 | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, | 728 | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, |
768 | IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED = 1<<3, | 729 | IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3, |
730 | IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4, | ||
769 | }; | 731 | }; |
770 | 732 | ||
771 | /** | 733 | /** |
@@ -777,7 +739,8 @@ enum ieee80211_hw_flags { | |||
777 | * @wiphy: This points to the &struct wiphy allocated for this | 739 | * @wiphy: This points to the &struct wiphy allocated for this |
778 | * 802.11 PHY. You must fill in the @perm_addr and @dev | 740 | * 802.11 PHY. You must fill in the @perm_addr and @dev |
779 | * members of this structure using SET_IEEE80211_DEV() | 741 | * members of this structure using SET_IEEE80211_DEV() |
780 | * and SET_IEEE80211_PERM_ADDR(). | 742 | * and SET_IEEE80211_PERM_ADDR(). Additionally, all supported |
743 | * bands (with channels, bitrates) are registered here. | ||
781 | * | 744 | * |
782 | * @conf: &struct ieee80211_conf, device configuration, don't use. | 745 | * @conf: &struct ieee80211_conf, device configuration, don't use. |
783 | * | 746 | * |
@@ -888,6 +851,16 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) | |||
888 | * parameter is guaranteed to be valid until another call to set_key() | 851 | * parameter is guaranteed to be valid until another call to set_key() |
889 | * removes it, but it can only be used as a cookie to differentiate | 852 | * removes it, but it can only be used as a cookie to differentiate |
890 | * keys. | 853 | * keys. |
854 | * | ||
855 | * In TKIP some HW need to be provided a phase 1 key, for RX decryption | ||
856 | * acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key | ||
857 | * handler. | ||
858 | * The update_tkip_key() call updates the driver with the new phase 1 key. | ||
859 | * This happens everytime the iv16 wraps around (every 65536 packets). The | ||
860 | * set_key() call will happen only once for each key (unless the AP did | ||
861 | * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is | ||
862 | * provided by udpate_tkip_key only. The trigger that makes mac80211 call this | ||
863 | * handler is software decryption with wrap around of iv16. | ||
891 | */ | 864 | */ |
892 | 865 | ||
893 | /** | 866 | /** |
@@ -913,8 +886,18 @@ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) | |||
913 | * parameter to see whether multicast frames should be accepted | 886 | * parameter to see whether multicast frames should be accepted |
914 | * or dropped. | 887 | * or dropped. |
915 | * | 888 | * |
916 | * All unsupported flags in @total_flags must be cleared, i.e. you | 889 | * All unsupported flags in @total_flags must be cleared. |
917 | * should clear all bits except those you honoured. | 890 | * Hardware does not support a flag if it is incapable of _passing_ |
891 | * the frame to the stack. Otherwise the driver must ignore | ||
892 | * the flag, but not clear it. | ||
893 | * You must _only_ clear the flag (announce no support for the | ||
894 | * flag to mac80211) if you are not able to pass the packet type | ||
895 | * to the stack (so the hardware always filters it). | ||
896 | * So for example, you should clear @FIF_CONTROL, if your hardware | ||
897 | * always filters control frames. If your hardware always passes | ||
898 | * control frames to the kernel and is incapable of filtering them, | ||
899 | * you do _not_ clear the @FIF_CONTROL flag. | ||
900 | * This rule applies to all other FIF flags as well. | ||
918 | */ | 901 | */ |
919 | 902 | ||
920 | /** | 903 | /** |
@@ -967,10 +950,14 @@ enum ieee80211_filter_flags { | |||
967 | * &struct ieee80211_ops to indicate which action is needed. | 950 | * &struct ieee80211_ops to indicate which action is needed. |
968 | * @IEEE80211_AMPDU_RX_START: start Rx aggregation | 951 | * @IEEE80211_AMPDU_RX_START: start Rx aggregation |
969 | * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation | 952 | * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation |
953 | * @IEEE80211_AMPDU_TX_START: start Tx aggregation | ||
954 | * @IEEE80211_AMPDU_TX_STOP: stop Tx aggregation | ||
970 | */ | 955 | */ |
971 | enum ieee80211_ampdu_mlme_action { | 956 | enum ieee80211_ampdu_mlme_action { |
972 | IEEE80211_AMPDU_RX_START, | 957 | IEEE80211_AMPDU_RX_START, |
973 | IEEE80211_AMPDU_RX_STOP, | 958 | IEEE80211_AMPDU_RX_STOP, |
959 | IEEE80211_AMPDU_TX_START, | ||
960 | IEEE80211_AMPDU_TX_STOP, | ||
974 | }; | 961 | }; |
975 | 962 | ||
976 | /** | 963 | /** |
@@ -1033,8 +1020,7 @@ enum ieee80211_ampdu_mlme_action { | |||
1033 | * level driver (e.g. assoc/disassoc status, erp parameters). | 1020 | * level driver (e.g. assoc/disassoc status, erp parameters). |
1034 | * This function should not be used if no BSS has been set, unless | 1021 | * This function should not be used if no BSS has been set, unless |
1035 | * for association indication. The @changed parameter indicates which | 1022 | * for association indication. The @changed parameter indicates which |
1036 | * of the bss parameters has changed when a call is made. This callback | 1023 | * of the bss parameters has changed when a call is made. |
1037 | * has to be atomic. | ||
1038 | * | 1024 | * |
1039 | * @configure_filter: Configure the device's RX filter. | 1025 | * @configure_filter: Configure the device's RX filter. |
1040 | * See the section "Frame filtering" for more information. | 1026 | * See the section "Frame filtering" for more information. |
@@ -1050,8 +1036,14 @@ enum ieee80211_ampdu_mlme_action { | |||
1050 | * and remove_interface calls, i.e. while the interface with the | 1036 | * and remove_interface calls, i.e. while the interface with the |
1051 | * given local_address is enabled. | 1037 | * given local_address is enabled. |
1052 | * | 1038 | * |
1039 | * @update_tkip_key: See the section "Hardware crypto acceleration" | ||
1040 | * This callback will be called in the context of Rx. Called for drivers | ||
1041 | * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. | ||
1042 | * | ||
1053 | * @hw_scan: Ask the hardware to service the scan request, no need to start | 1043 | * @hw_scan: Ask the hardware to service the scan request, no need to start |
1054 | * the scan state machine in stack. | 1044 | * the scan state machine in stack. The scan must honour the channel |
1045 | * configuration done by the regulatory agent in the wiphy's registered | ||
1046 | * bands. | ||
1055 | * | 1047 | * |
1056 | * @get_stats: return low-level statistics | 1048 | * @get_stats: return low-level statistics |
1057 | * | 1049 | * |
@@ -1111,7 +1103,8 @@ enum ieee80211_ampdu_mlme_action { | |||
1111 | * The RA/TID combination determines the destination and TID we want | 1103 | * The RA/TID combination determines the destination and TID we want |
1112 | * the ampdu action to be performed for. The action is defined through | 1104 | * the ampdu action to be performed for. The action is defined through |
1113 | * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) | 1105 | * ieee80211_ampdu_mlme_action. Starting sequence number (@ssn) |
1114 | * is the first frame we expect to perform the action on. | 1106 | * is the first frame we expect to perform the action on. notice |
1107 | * that TX/RX_STOP can pass NULL for this parameter. | ||
1115 | */ | 1108 | */ |
1116 | struct ieee80211_ops { | 1109 | struct ieee80211_ops { |
1117 | int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb, | 1110 | int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb, |
@@ -1138,6 +1131,9 @@ struct ieee80211_ops { | |||
1138 | int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, | 1131 | int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, |
1139 | const u8 *local_address, const u8 *address, | 1132 | const u8 *local_address, const u8 *address, |
1140 | struct ieee80211_key_conf *key); | 1133 | struct ieee80211_key_conf *key); |
1134 | void (*update_tkip_key)(struct ieee80211_hw *hw, | ||
1135 | struct ieee80211_key_conf *conf, const u8 *address, | ||
1136 | u32 iv32, u16 *phase1key); | ||
1141 | int (*hw_scan)(struct ieee80211_hw *hw, u8 *ssid, size_t len); | 1137 | int (*hw_scan)(struct ieee80211_hw *hw, u8 *ssid, size_t len); |
1142 | int (*get_stats)(struct ieee80211_hw *hw, | 1138 | int (*get_stats)(struct ieee80211_hw *hw, |
1143 | struct ieee80211_low_level_stats *stats); | 1139 | struct ieee80211_low_level_stats *stats); |
@@ -1159,10 +1155,9 @@ struct ieee80211_ops { | |||
1159 | struct sk_buff *skb, | 1155 | struct sk_buff *skb, |
1160 | struct ieee80211_tx_control *control); | 1156 | struct ieee80211_tx_control *control); |
1161 | int (*tx_last_beacon)(struct ieee80211_hw *hw); | 1157 | int (*tx_last_beacon)(struct ieee80211_hw *hw); |
1162 | int (*conf_ht)(struct ieee80211_hw *hw, struct ieee80211_conf *conf); | ||
1163 | int (*ampdu_action)(struct ieee80211_hw *hw, | 1158 | int (*ampdu_action)(struct ieee80211_hw *hw, |
1164 | enum ieee80211_ampdu_mlme_action action, | 1159 | enum ieee80211_ampdu_mlme_action action, |
1165 | const u8 *ra, u16 tid, u16 ssn); | 1160 | const u8 *addr, u16 tid, u16 *ssn); |
1166 | }; | 1161 | }; |
1167 | 1162 | ||
1168 | /** | 1163 | /** |
@@ -1183,8 +1178,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
1183 | /** | 1178 | /** |
1184 | * ieee80211_register_hw - Register hardware device | 1179 | * ieee80211_register_hw - Register hardware device |
1185 | * | 1180 | * |
1186 | * You must call this function before any other functions | 1181 | * You must call this function before any other functions in |
1187 | * except ieee80211_register_hwmode. | 1182 | * mac80211. Note that before a hardware can be registered, you |
1183 | * need to fill the contained wiphy's information. | ||
1188 | * | 1184 | * |
1189 | * @hw: the device to register as returned by ieee80211_alloc_hw() | 1185 | * @hw: the device to register as returned by ieee80211_alloc_hw() |
1190 | */ | 1186 | */ |
@@ -1272,10 +1268,6 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw) | |||
1272 | #endif | 1268 | #endif |
1273 | } | 1269 | } |
1274 | 1270 | ||
1275 | /* Register a new hardware PHYMODE capability to the stack. */ | ||
1276 | int ieee80211_register_hwmode(struct ieee80211_hw *hw, | ||
1277 | struct ieee80211_hw_mode *mode); | ||
1278 | |||
1279 | /** | 1271 | /** |
1280 | * ieee80211_unregister_hw - Unregister a hardware device | 1272 | * ieee80211_unregister_hw - Unregister a hardware device |
1281 | * | 1273 | * |
@@ -1308,7 +1300,10 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1308 | * buffer in @skb must start with an IEEE 802.11 header or a radiotap | 1300 | * buffer in @skb must start with an IEEE 802.11 header or a radiotap |
1309 | * header if %RX_FLAG_RADIOTAP is set in the @status flags. | 1301 | * header if %RX_FLAG_RADIOTAP is set in the @status flags. |
1310 | * | 1302 | * |
1311 | * This function may not be called in IRQ context. | 1303 | * This function may not be called in IRQ context. Calls to this function |
1304 | * for a single hardware must be synchronized against each other. Calls | ||
1305 | * to this function and ieee80211_rx_irqsafe() may not be mixed for a | ||
1306 | * single hardware. | ||
1312 | * | 1307 | * |
1313 | * @hw: the hardware this frame came in on | 1308 | * @hw: the hardware this frame came in on |
1314 | * @skb: the buffer to receive, owned by mac80211 after this call | 1309 | * @skb: the buffer to receive, owned by mac80211 after this call |
@@ -1325,7 +1320,10 @@ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1325 | * ieee80211_rx_irqsafe - receive frame | 1320 | * ieee80211_rx_irqsafe - receive frame |
1326 | * | 1321 | * |
1327 | * Like ieee80211_rx() but can be called in IRQ context | 1322 | * Like ieee80211_rx() but can be called in IRQ context |
1328 | * (internally defers to a workqueue.) | 1323 | * (internally defers to a tasklet.) |
1324 | * | ||
1325 | * Calls to this function and ieee80211_rx() may not be mixed for a | ||
1326 | * single hardware. | ||
1329 | * | 1327 | * |
1330 | * @hw: the hardware this frame came in on | 1328 | * @hw: the hardware this frame came in on |
1331 | * @skb: the buffer to receive, owned by mac80211 after this call | 1329 | * @skb: the buffer to receive, owned by mac80211 after this call |
@@ -1344,6 +1342,11 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, | |||
1344 | * transmitted. It is permissible to not call this function for | 1342 | * transmitted. It is permissible to not call this function for |
1345 | * multicast frames but this can affect statistics. | 1343 | * multicast frames but this can affect statistics. |
1346 | * | 1344 | * |
1345 | * This function may not be called in IRQ context. Calls to this function | ||
1346 | * for a single hardware must be synchronized against each other. Calls | ||
1347 | * to this function and ieee80211_tx_status_irqsafe() may not be mixed | ||
1348 | * for a single hardware. | ||
1349 | * | ||
1347 | * @hw: the hardware the frame was transmitted by | 1350 | * @hw: the hardware the frame was transmitted by |
1348 | * @skb: the frame that was transmitted, owned by mac80211 after this call | 1351 | * @skb: the frame that was transmitted, owned by mac80211 after this call |
1349 | * @status: status information for this frame; the status pointer need not | 1352 | * @status: status information for this frame; the status pointer need not |
@@ -1353,6 +1356,22 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, | |||
1353 | void ieee80211_tx_status(struct ieee80211_hw *hw, | 1356 | void ieee80211_tx_status(struct ieee80211_hw *hw, |
1354 | struct sk_buff *skb, | 1357 | struct sk_buff *skb, |
1355 | struct ieee80211_tx_status *status); | 1358 | struct ieee80211_tx_status *status); |
1359 | |||
1360 | /** | ||
1361 | * ieee80211_tx_status_irqsafe - irq-safe transmit status callback | ||
1362 | * | ||
1363 | * Like ieee80211_tx_status() but can be called in IRQ context | ||
1364 | * (internally defers to a tasklet.) | ||
1365 | * | ||
1366 | * Calls to this function and ieee80211_tx_status() may not be mixed for a | ||
1367 | * single hardware. | ||
1368 | * | ||
1369 | * @hw: the hardware the frame was transmitted by | ||
1370 | * @skb: the frame that was transmitted, owned by mac80211 after this call | ||
1371 | * @status: status information for this frame; the status pointer need not | ||
1372 | * be valid after this function returns and is not freed by mac80211, | ||
1373 | * it is recommended that it points to a stack area | ||
1374 | */ | ||
1356 | void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, | 1375 | void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, |
1357 | struct sk_buff *skb, | 1376 | struct sk_buff *skb, |
1358 | struct ieee80211_tx_status *status); | 1377 | struct ieee80211_tx_status *status); |
@@ -1449,7 +1468,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
1449 | * @hw: pointer obtained from ieee80211_alloc_hw(). | 1468 | * @hw: pointer obtained from ieee80211_alloc_hw(). |
1450 | * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. | 1469 | * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf. |
1451 | * @frame_len: the length of the frame. | 1470 | * @frame_len: the length of the frame. |
1452 | * @rate: the rate (in 100kbps) at which the frame is going to be transmitted. | 1471 | * @rate: the rate at which the frame is going to be transmitted. |
1453 | * | 1472 | * |
1454 | * Calculate the duration field of some generic frame, given its | 1473 | * Calculate the duration field of some generic frame, given its |
1455 | * length and transmission rate (in 100kbps). | 1474 | * length and transmission rate (in 100kbps). |
@@ -1457,7 +1476,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, | |||
1457 | __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, | 1476 | __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, |
1458 | struct ieee80211_vif *vif, | 1477 | struct ieee80211_vif *vif, |
1459 | size_t frame_len, | 1478 | size_t frame_len, |
1460 | int rate); | 1479 | struct ieee80211_rate *rate); |
1461 | 1480 | ||
1462 | /** | 1481 | /** |
1463 | * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames | 1482 | * ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames |
@@ -1507,6 +1526,21 @@ int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); | |||
1507 | int ieee80211_get_hdrlen(u16 fc); | 1526 | int ieee80211_get_hdrlen(u16 fc); |
1508 | 1527 | ||
1509 | /** | 1528 | /** |
1529 | * ieee80211_get_tkip_key - get a TKIP rc4 for skb | ||
1530 | * | ||
1531 | * This function computes a TKIP rc4 key for an skb. It computes | ||
1532 | * a phase 1 key if needed (iv16 wraps around). This function is to | ||
1533 | * be used by drivers which can do HW encryption but need to compute | ||
1534 | * to phase 1/2 key in SW. | ||
1535 | * | ||
1536 | * @keyconf: the parameter passed with the set key | ||
1537 | * @skb: the skb for which the key is needed | ||
1538 | * @rc4key: a buffer to which the key will be written | ||
1539 | */ | ||
1540 | void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, | ||
1541 | struct sk_buff *skb, | ||
1542 | enum ieee80211_tkip_key_type type, u8 *key); | ||
1543 | /** | ||
1510 | * ieee80211_wake_queue - wake specific queue | 1544 | * ieee80211_wake_queue - wake specific queue |
1511 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | 1545 | * @hw: pointer as obtained from ieee80211_alloc_hw(). |
1512 | * @queue: queue number (counted from zero). | 1546 | * @queue: queue number (counted from zero). |
@@ -1574,4 +1608,92 @@ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, | |||
1574 | struct ieee80211_vif *vif), | 1608 | struct ieee80211_vif *vif), |
1575 | void *data); | 1609 | void *data); |
1576 | 1610 | ||
1611 | /** | ||
1612 | * ieee80211_start_tx_ba_session - Start a tx Block Ack session. | ||
1613 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1614 | * @ra: receiver address of the BA session recipient | ||
1615 | * @tid: the TID to BA on. | ||
1616 | * @return: success if addBA request was sent, failure otherwise | ||
1617 | * | ||
1618 | * Although mac80211/low level driver/user space application can estimate | ||
1619 | * the need to start aggregation on a certain RA/TID, the session level | ||
1620 | * will be managed by the mac80211. | ||
1621 | */ | ||
1622 | int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid); | ||
1623 | |||
1624 | /** | ||
1625 | * ieee80211_start_tx_ba_cb - low level driver ready to aggregate. | ||
1626 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1627 | * @ra: receiver address of the BA session recipient. | ||
1628 | * @tid: the TID to BA on. | ||
1629 | * | ||
1630 | * This function must be called by low level driver once it has | ||
1631 | * finished with preparations for the BA session. | ||
1632 | */ | ||
1633 | void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid); | ||
1634 | |||
1635 | /** | ||
1636 | * ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate. | ||
1637 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1638 | * @ra: receiver address of the BA session recipient. | ||
1639 | * @tid: the TID to BA on. | ||
1640 | * | ||
1641 | * This function must be called by low level driver once it has | ||
1642 | * finished with preparations for the BA session. | ||
1643 | * This version of the function is irq safe. | ||
1644 | */ | ||
1645 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, | ||
1646 | u16 tid); | ||
1647 | |||
1648 | /** | ||
1649 | * ieee80211_stop_tx_ba_session - Stop a Block Ack session. | ||
1650 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1651 | * @ra: receiver address of the BA session recipient | ||
1652 | * @tid: the TID to stop BA. | ||
1653 | * @initiator: if indicates initiator DELBA frame will be sent. | ||
1654 | * @return: error if no sta with matching da found, success otherwise | ||
1655 | * | ||
1656 | * Although mac80211/low level driver/user space application can estimate | ||
1657 | * the need to stop aggregation on a certain RA/TID, the session level | ||
1658 | * will be managed by the mac80211. | ||
1659 | */ | ||
1660 | int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw, | ||
1661 | u8 *ra, u16 tid, | ||
1662 | enum ieee80211_back_parties initiator); | ||
1663 | |||
1664 | /** | ||
1665 | * ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate. | ||
1666 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1667 | * @ra: receiver address of the BA session recipient. | ||
1668 | * @tid: the desired TID to BA on. | ||
1669 | * | ||
1670 | * This function must be called by low level driver once it has | ||
1671 | * finished with preparations for the BA session tear down. | ||
1672 | */ | ||
1673 | void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid); | ||
1674 | |||
1675 | /** | ||
1676 | * ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate. | ||
1677 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1678 | * @ra: receiver address of the BA session recipient. | ||
1679 | * @tid: the desired TID to BA on. | ||
1680 | * | ||
1681 | * This function must be called by low level driver once it has | ||
1682 | * finished with preparations for the BA session tear down. | ||
1683 | * This version of the function is irq safe. | ||
1684 | */ | ||
1685 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra, | ||
1686 | u16 tid); | ||
1687 | |||
1688 | /** | ||
1689 | * ieee80211_notify_mac - low level driver notification | ||
1690 | * @hw: pointer as obtained from ieee80211_alloc_hw(). | ||
1691 | * @notification_types: enum ieee80211_notification_types | ||
1692 | * | ||
1693 | * This function must be called by low level driver to inform mac80211 of | ||
1694 | * low level driver status change or force mac80211 to re-assoc for low | ||
1695 | * level driver internal error that require re-assoc. | ||
1696 | */ | ||
1697 | void ieee80211_notify_mac(struct ieee80211_hw *hw, | ||
1698 | enum ieee80211_notification_types notif_type); | ||
1577 | #endif /* MAC80211_H */ | 1699 | #endif /* MAC80211_H */ |
diff --git a/include/net/mip6.h b/include/net/mip6.h index 63272610a24a..a83ad1982a90 100644 --- a/include/net/mip6.h +++ b/include/net/mip6.h | |||
@@ -28,9 +28,6 @@ | |||
28 | #include <linux/skbuff.h> | 28 | #include <linux/skbuff.h> |
29 | #include <net/sock.h> | 29 | #include <net/sock.h> |
30 | 30 | ||
31 | #define MIP6_OPT_PAD_1 0 | ||
32 | #define MIP6_OPT_PAD_N 1 | ||
33 | |||
34 | /* | 31 | /* |
35 | * Mobility Header | 32 | * Mobility Header |
36 | */ | 33 | */ |
diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 59b70624b056..9c451ff2f4f4 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h | |||
@@ -12,6 +12,15 @@ | |||
12 | #define NDISC_REDIRECT 137 | 12 | #define NDISC_REDIRECT 137 |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * Router type: cross-layer information from link-layer to | ||
16 | * IPv6 layer reported by certain link types (e.g., RFC4214). | ||
17 | */ | ||
18 | #define NDISC_NODETYPE_UNSPEC 0 /* unspecified (default) */ | ||
19 | #define NDISC_NODETYPE_HOST 1 /* host or unauthorized router */ | ||
20 | #define NDISC_NODETYPE_NODEFAULT 2 /* non-default router */ | ||
21 | #define NDISC_NODETYPE_DEFAULT 3 /* default router */ | ||
22 | |||
23 | /* | ||
15 | * ndisc options | 24 | * ndisc options |
16 | */ | 25 | */ |
17 | 26 | ||
@@ -77,7 +86,7 @@ struct nd_opt_hdr { | |||
77 | } __attribute__((__packed__)); | 86 | } __attribute__((__packed__)); |
78 | 87 | ||
79 | 88 | ||
80 | extern int ndisc_init(struct net_proto_family *ops); | 89 | extern int ndisc_init(void); |
81 | 90 | ||
82 | extern void ndisc_cleanup(void); | 91 | extern void ndisc_cleanup(void); |
83 | 92 | ||
@@ -85,20 +94,17 @@ extern int ndisc_rcv(struct sk_buff *skb); | |||
85 | 94 | ||
86 | extern void ndisc_send_ns(struct net_device *dev, | 95 | extern void ndisc_send_ns(struct net_device *dev, |
87 | struct neighbour *neigh, | 96 | struct neighbour *neigh, |
88 | struct in6_addr *solicit, | 97 | const struct in6_addr *solicit, |
89 | struct in6_addr *daddr, | 98 | const struct in6_addr *daddr, |
90 | struct in6_addr *saddr); | 99 | const struct in6_addr *saddr); |
91 | 100 | ||
92 | extern void ndisc_send_rs(struct net_device *dev, | 101 | extern void ndisc_send_rs(struct net_device *dev, |
93 | struct in6_addr *saddr, | 102 | const struct in6_addr *saddr, |
94 | struct in6_addr *daddr); | 103 | const struct in6_addr *daddr); |
95 | |||
96 | extern void ndisc_forwarding_on(void); | ||
97 | extern void ndisc_forwarding_off(void); | ||
98 | 104 | ||
99 | extern void ndisc_send_redirect(struct sk_buff *skb, | 105 | extern void ndisc_send_redirect(struct sk_buff *skb, |
100 | struct neighbour *neigh, | 106 | struct neighbour *neigh, |
101 | struct in6_addr *target); | 107 | const struct in6_addr *target); |
102 | 108 | ||
103 | extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir); | 109 | extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir); |
104 | 110 | ||
@@ -107,7 +113,7 @@ extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *d | |||
107 | /* | 113 | /* |
108 | * IGMP | 114 | * IGMP |
109 | */ | 115 | */ |
110 | extern int igmp6_init(struct net_proto_family *ops); | 116 | extern int igmp6_init(void); |
111 | 117 | ||
112 | extern void igmp6_cleanup(void); | 118 | extern void igmp6_cleanup(void); |
113 | 119 | ||
@@ -115,7 +121,6 @@ extern int igmp6_event_query(struct sk_buff *skb); | |||
115 | 121 | ||
116 | extern int igmp6_event_report(struct sk_buff *skb); | 122 | extern int igmp6_event_report(struct sk_buff *skb); |
117 | 123 | ||
118 | extern void igmp6_cleanup(void); | ||
119 | 124 | ||
120 | #ifdef CONFIG_SYSCTL | 125 | #ifdef CONFIG_SYSCTL |
121 | extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, | 126 | extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, |
@@ -129,7 +134,7 @@ extern int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, | |||
129 | extern void inet6_ifinfo_notify(int event, | 134 | extern void inet6_ifinfo_notify(int event, |
130 | struct inet6_dev *idev); | 135 | struct inet6_dev *idev); |
131 | 136 | ||
132 | static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, struct in6_addr *addr) | 137 | static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr) |
133 | { | 138 | { |
134 | 139 | ||
135 | if (dev) | 140 | if (dev) |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 64a5f0120b52..dc420fecafb9 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
@@ -38,7 +38,9 @@ struct neighbour; | |||
38 | 38 | ||
39 | struct neigh_parms | 39 | struct neigh_parms |
40 | { | 40 | { |
41 | #ifdef CONFIG_NET_NS | ||
41 | struct net *net; | 42 | struct net *net; |
43 | #endif | ||
42 | struct net_device *dev; | 44 | struct net_device *dev; |
43 | struct neigh_parms *next; | 45 | struct neigh_parms *next; |
44 | int (*neigh_setup)(struct neighbour *); | 46 | int (*neigh_setup)(struct neighbour *); |
@@ -131,7 +133,9 @@ struct neigh_ops | |||
131 | struct pneigh_entry | 133 | struct pneigh_entry |
132 | { | 134 | { |
133 | struct pneigh_entry *next; | 135 | struct pneigh_entry *next; |
136 | #ifdef CONFIG_NET_NS | ||
134 | struct net *net; | 137 | struct net *net; |
138 | #endif | ||
135 | struct net_device *dev; | 139 | struct net_device *dev; |
136 | u8 flags; | 140 | u8 flags; |
137 | u8 key[0]; | 141 | u8 key[0]; |
@@ -213,6 +217,17 @@ extern struct neighbour *neigh_event_ns(struct neigh_table *tbl, | |||
213 | 217 | ||
214 | extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl); | 218 | extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl); |
215 | extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); | 219 | extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); |
220 | |||
221 | static inline | ||
222 | struct net *neigh_parms_net(const struct neigh_parms *parms) | ||
223 | { | ||
224 | #ifdef CONFIG_NET_NS | ||
225 | return parms->net; | ||
226 | #else | ||
227 | return &init_net; | ||
228 | #endif | ||
229 | } | ||
230 | |||
216 | extern unsigned long neigh_rand_reach_time(unsigned long base); | 231 | extern unsigned long neigh_rand_reach_time(unsigned long base); |
217 | 232 | ||
218 | extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, | 233 | extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, |
@@ -224,6 +239,16 @@ extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, | |||
224 | struct net_device *dev); | 239 | struct net_device *dev); |
225 | extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); | 240 | extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev); |
226 | 241 | ||
242 | static inline | ||
243 | struct net *pneigh_net(const struct pneigh_entry *pneigh) | ||
244 | { | ||
245 | #ifdef CONFIG_NET_NS | ||
246 | return pneigh->net; | ||
247 | #else | ||
248 | return &init_net; | ||
249 | #endif | ||
250 | } | ||
251 | |||
227 | extern void neigh_app_ns(struct neighbour *n); | 252 | extern void neigh_app_ns(struct neighbour *n); |
228 | extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); | 253 | extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); |
229 | extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); | 254 | extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); |
@@ -288,12 +313,6 @@ static inline void neigh_confirm(struct neighbour *neigh) | |||
288 | neigh->confirmed = jiffies; | 313 | neigh->confirmed = jiffies; |
289 | } | 314 | } |
290 | 315 | ||
291 | static inline int neigh_is_connected(struct neighbour *neigh) | ||
292 | { | ||
293 | return neigh->nud_state&NUD_CONNECTED; | ||
294 | } | ||
295 | |||
296 | |||
297 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | 316 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) |
298 | { | 317 | { |
299 | neigh->used = jiffies; | 318 | neigh->used = jiffies; |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 923f2b8b9096..aa540e6be502 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -8,24 +8,29 @@ | |||
8 | #include <linux/workqueue.h> | 8 | #include <linux/workqueue.h> |
9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
10 | 10 | ||
11 | #include <net/netns/core.h> | ||
11 | #include <net/netns/unix.h> | 12 | #include <net/netns/unix.h> |
12 | #include <net/netns/packet.h> | 13 | #include <net/netns/packet.h> |
13 | #include <net/netns/ipv4.h> | 14 | #include <net/netns/ipv4.h> |
14 | #include <net/netns/ipv6.h> | 15 | #include <net/netns/ipv6.h> |
16 | #include <net/netns/dccp.h> | ||
15 | #include <net/netns/x_tables.h> | 17 | #include <net/netns/x_tables.h> |
16 | 18 | ||
17 | struct proc_dir_entry; | 19 | struct proc_dir_entry; |
18 | struct net_device; | 20 | struct net_device; |
19 | struct sock; | 21 | struct sock; |
20 | struct ctl_table_header; | 22 | struct ctl_table_header; |
23 | struct net_generic; | ||
21 | 24 | ||
22 | struct net { | 25 | struct net { |
23 | atomic_t count; /* To decided when the network | 26 | atomic_t count; /* To decided when the network |
24 | * namespace should be freed. | 27 | * namespace should be freed. |
25 | */ | 28 | */ |
29 | #ifdef NETNS_REFCNT_DEBUG | ||
26 | atomic_t use_count; /* To track references we | 30 | atomic_t use_count; /* To track references we |
27 | * destroy on demand | 31 | * destroy on demand |
28 | */ | 32 | */ |
33 | #endif | ||
29 | struct list_head list; /* list of network namespaces */ | 34 | struct list_head list; /* list of network namespaces */ |
30 | struct work_struct work; /* work struct for freeing */ | 35 | struct work_struct work; /* work struct for freeing */ |
31 | 36 | ||
@@ -46,40 +51,46 @@ struct net { | |||
46 | 51 | ||
47 | struct sock *rtnl; /* rtnetlink socket */ | 52 | struct sock *rtnl; /* rtnetlink socket */ |
48 | 53 | ||
49 | /* core sysctls */ | 54 | struct netns_core core; |
50 | struct ctl_table_header *sysctl_core_hdr; | ||
51 | int sysctl_somaxconn; | ||
52 | |||
53 | struct netns_packet packet; | 55 | struct netns_packet packet; |
54 | struct netns_unix unx; | 56 | struct netns_unix unx; |
55 | struct netns_ipv4 ipv4; | 57 | struct netns_ipv4 ipv4; |
56 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 58 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
57 | struct netns_ipv6 ipv6; | 59 | struct netns_ipv6 ipv6; |
58 | #endif | 60 | #endif |
61 | #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) | ||
62 | struct netns_dccp dccp; | ||
63 | #endif | ||
59 | #ifdef CONFIG_NETFILTER | 64 | #ifdef CONFIG_NETFILTER |
60 | struct netns_xt xt; | 65 | struct netns_xt xt; |
61 | #endif | 66 | #endif |
67 | struct net_generic *gen; | ||
62 | }; | 68 | }; |
63 | 69 | ||
64 | #ifdef CONFIG_NET | 70 | |
71 | #include <linux/seq_file_net.h> | ||
72 | |||
65 | /* Init's network namespace */ | 73 | /* Init's network namespace */ |
66 | extern struct net init_net; | 74 | extern struct net init_net; |
67 | #define INIT_NET_NS(net_ns) .net_ns = &init_net, | ||
68 | #else | ||
69 | #define INIT_NET_NS(net_ns) | ||
70 | #endif | ||
71 | |||
72 | extern struct list_head net_namespace_list; | ||
73 | 75 | ||
74 | #ifdef CONFIG_NET | 76 | #ifdef CONFIG_NET |
77 | #define INIT_NET_NS(net_ns) .net_ns = &init_net, | ||
78 | |||
75 | extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns); | 79 | extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns); |
76 | #else | 80 | |
81 | #else /* CONFIG_NET */ | ||
82 | |||
83 | #define INIT_NET_NS(net_ns) | ||
84 | |||
77 | static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns) | 85 | static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns) |
78 | { | 86 | { |
79 | /* There is nothing to copy so this is a noop */ | 87 | /* There is nothing to copy so this is a noop */ |
80 | return net_ns; | 88 | return net_ns; |
81 | } | 89 | } |
82 | #endif | 90 | #endif /* CONFIG_NET */ |
91 | |||
92 | |||
93 | extern struct list_head net_namespace_list; | ||
83 | 94 | ||
84 | #ifdef CONFIG_NET_NS | 95 | #ifdef CONFIG_NET_NS |
85 | extern void __put_net(struct net *net); | 96 | extern void __put_net(struct net *net); |
@@ -108,41 +119,59 @@ static inline void put_net(struct net *net) | |||
108 | __put_net(net); | 119 | __put_net(net); |
109 | } | 120 | } |
110 | 121 | ||
111 | static inline struct net *hold_net(struct net *net) | 122 | static inline |
123 | int net_eq(const struct net *net1, const struct net *net2) | ||
124 | { | ||
125 | return net1 == net2; | ||
126 | } | ||
127 | #else | ||
128 | static inline struct net *get_net(struct net *net) | ||
112 | { | 129 | { |
113 | atomic_inc(&net->use_count); | ||
114 | return net; | 130 | return net; |
115 | } | 131 | } |
116 | 132 | ||
117 | static inline void release_net(struct net *net) | 133 | static inline void put_net(struct net *net) |
118 | { | 134 | { |
119 | atomic_dec(&net->use_count); | ||
120 | } | 135 | } |
121 | #else | 136 | |
122 | static inline struct net *get_net(struct net *net) | 137 | static inline struct net *maybe_get_net(struct net *net) |
123 | { | 138 | { |
124 | return net; | 139 | return net; |
125 | } | 140 | } |
126 | 141 | ||
127 | static inline void put_net(struct net *net) | 142 | static inline |
143 | int net_eq(const struct net *net1, const struct net *net2) | ||
128 | { | 144 | { |
145 | return 1; | ||
129 | } | 146 | } |
147 | #endif | ||
148 | |||
130 | 149 | ||
150 | #ifdef NETNS_REFCNT_DEBUG | ||
131 | static inline struct net *hold_net(struct net *net) | 151 | static inline struct net *hold_net(struct net *net) |
132 | { | 152 | { |
153 | if (net) | ||
154 | atomic_inc(&net->use_count); | ||
133 | return net; | 155 | return net; |
134 | } | 156 | } |
135 | 157 | ||
136 | static inline void release_net(struct net *net) | 158 | static inline void release_net(struct net *net) |
137 | { | 159 | { |
160 | if (net) | ||
161 | atomic_dec(&net->use_count); | ||
138 | } | 162 | } |
139 | 163 | #else | |
140 | static inline struct net *maybe_get_net(struct net *net) | 164 | static inline struct net *hold_net(struct net *net) |
141 | { | 165 | { |
142 | return net; | 166 | return net; |
143 | } | 167 | } |
168 | |||
169 | static inline void release_net(struct net *net) | ||
170 | { | ||
171 | } | ||
144 | #endif | 172 | #endif |
145 | 173 | ||
174 | |||
146 | #define for_each_net(VAR) \ | 175 | #define for_each_net(VAR) \ |
147 | list_for_each_entry(VAR, &net_namespace_list, list) | 176 | list_for_each_entry(VAR, &net_namespace_list, list) |
148 | 177 | ||
@@ -166,6 +195,8 @@ extern int register_pernet_subsys(struct pernet_operations *); | |||
166 | extern void unregister_pernet_subsys(struct pernet_operations *); | 195 | extern void unregister_pernet_subsys(struct pernet_operations *); |
167 | extern int register_pernet_device(struct pernet_operations *); | 196 | extern int register_pernet_device(struct pernet_operations *); |
168 | extern void unregister_pernet_device(struct pernet_operations *); | 197 | extern void unregister_pernet_device(struct pernet_operations *); |
198 | extern int register_pernet_gen_device(int *id, struct pernet_operations *); | ||
199 | extern void unregister_pernet_gen_device(int id, struct pernet_operations *); | ||
169 | 200 | ||
170 | struct ctl_path; | 201 | struct ctl_path; |
171 | struct ctl_table; | 202 | struct ctl_table; |
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 90b3e7f5df5f..2dbd6c015b94 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
21 | 21 | ||
22 | #include <linux/netfilter/nf_conntrack_tcp.h> | 22 | #include <linux/netfilter/nf_conntrack_tcp.h> |
23 | #include <linux/netfilter/nf_conntrack_dccp.h> | ||
23 | #include <linux/netfilter/nf_conntrack_sctp.h> | 24 | #include <linux/netfilter/nf_conntrack_sctp.h> |
24 | #include <linux/netfilter/nf_conntrack_proto_gre.h> | 25 | #include <linux/netfilter/nf_conntrack_proto_gre.h> |
25 | #include <net/netfilter/ipv4/nf_conntrack_icmp.h> | 26 | #include <net/netfilter/ipv4/nf_conntrack_icmp.h> |
@@ -30,6 +31,7 @@ | |||
30 | /* per conntrack: protocol private data */ | 31 | /* per conntrack: protocol private data */ |
31 | union nf_conntrack_proto { | 32 | union nf_conntrack_proto { |
32 | /* insert conntrack proto private data here */ | 33 | /* insert conntrack proto private data here */ |
34 | struct nf_ct_dccp dccp; | ||
33 | struct ip_ct_sctp sctp; | 35 | struct ip_ct_sctp sctp; |
34 | struct ip_ct_tcp tcp; | 36 | struct ip_ct_tcp tcp; |
35 | struct ip_ct_icmp icmp; | 37 | struct ip_ct_icmp icmp; |
@@ -46,6 +48,7 @@ union nf_conntrack_expect_proto { | |||
46 | #include <linux/netfilter/nf_conntrack_pptp.h> | 48 | #include <linux/netfilter/nf_conntrack_pptp.h> |
47 | #include <linux/netfilter/nf_conntrack_h323.h> | 49 | #include <linux/netfilter/nf_conntrack_h323.h> |
48 | #include <linux/netfilter/nf_conntrack_sane.h> | 50 | #include <linux/netfilter/nf_conntrack_sane.h> |
51 | #include <linux/netfilter/nf_conntrack_sip.h> | ||
49 | 52 | ||
50 | /* per conntrack: application helper private data */ | 53 | /* per conntrack: application helper private data */ |
51 | union nf_conntrack_help { | 54 | union nf_conntrack_help { |
@@ -54,6 +57,7 @@ union nf_conntrack_help { | |||
54 | struct nf_ct_pptp_master ct_pptp_info; | 57 | struct nf_ct_pptp_master ct_pptp_info; |
55 | struct nf_ct_h323_master ct_h323_info; | 58 | struct nf_ct_h323_master ct_h323_info; |
56 | struct nf_ct_sane_master ct_sane_info; | 59 | struct nf_ct_sane_master ct_sane_info; |
60 | struct nf_ct_sip_master ct_sip_info; | ||
57 | }; | 61 | }; |
58 | 62 | ||
59 | #include <linux/types.h> | 63 | #include <linux/types.h> |
@@ -61,20 +65,16 @@ union nf_conntrack_help { | |||
61 | #include <linux/timer.h> | 65 | #include <linux/timer.h> |
62 | 66 | ||
63 | #ifdef CONFIG_NETFILTER_DEBUG | 67 | #ifdef CONFIG_NETFILTER_DEBUG |
64 | #define NF_CT_ASSERT(x) \ | 68 | #define NF_CT_ASSERT(x) WARN_ON(!(x)) |
65 | do { \ | ||
66 | if (!(x)) \ | ||
67 | /* Wooah! I'm tripping my conntrack in a frenzy of \ | ||
68 | netplay... */ \ | ||
69 | printk("NF_CT_ASSERT: %s:%i(%s)\n", \ | ||
70 | __FILE__, __LINE__, __FUNCTION__); \ | ||
71 | } while(0) | ||
72 | #else | 69 | #else |
73 | #define NF_CT_ASSERT(x) | 70 | #define NF_CT_ASSERT(x) |
74 | #endif | 71 | #endif |
75 | 72 | ||
76 | struct nf_conntrack_helper; | 73 | struct nf_conntrack_helper; |
77 | 74 | ||
75 | /* Must be kept in sync with the classes defined by helpers */ | ||
76 | #define NF_CT_MAX_EXPECT_CLASSES 3 | ||
77 | |||
78 | /* nf_conn feature for connections that have a helper */ | 78 | /* nf_conn feature for connections that have a helper */ |
79 | struct nf_conn_help { | 79 | struct nf_conn_help { |
80 | /* Helper. if any */ | 80 | /* Helper. if any */ |
@@ -85,7 +85,7 @@ struct nf_conn_help { | |||
85 | struct hlist_head expectations; | 85 | struct hlist_head expectations; |
86 | 86 | ||
87 | /* Current number of expected connections */ | 87 | /* Current number of expected connections */ |
88 | unsigned int expecting; | 88 | u8 expecting[NF_CT_MAX_EXPECT_CLASSES]; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | 91 | ||
@@ -140,6 +140,16 @@ nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash) | |||
140 | tuplehash[hash->tuple.dst.dir]); | 140 | tuplehash[hash->tuple.dst.dir]); |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct) | ||
144 | { | ||
145 | return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
146 | } | ||
147 | |||
148 | static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct) | ||
149 | { | ||
150 | return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; | ||
151 | } | ||
152 | |||
143 | /* get master conntrack via master expectation */ | 153 | /* get master conntrack via master expectation */ |
144 | #define master_ct(conntr) (conntr->master) | 154 | #define master_ct(conntr) (conntr->master) |
145 | 155 | ||
@@ -184,12 +194,11 @@ extern void nf_conntrack_hash_insert(struct nf_conn *ct); | |||
184 | 194 | ||
185 | extern void nf_conntrack_flush(void); | 195 | extern void nf_conntrack_flush(void); |
186 | 196 | ||
187 | extern int nf_ct_get_tuplepr(const struct sk_buff *skb, | 197 | extern bool nf_ct_get_tuplepr(const struct sk_buff *skb, |
188 | unsigned int nhoff, | 198 | unsigned int nhoff, u_int16_t l3num, |
189 | u_int16_t l3num, | 199 | struct nf_conntrack_tuple *tuple); |
190 | struct nf_conntrack_tuple *tuple); | 200 | extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, |
191 | extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | 201 | const struct nf_conntrack_tuple *orig); |
192 | const struct nf_conntrack_tuple *orig); | ||
193 | 202 | ||
194 | extern void __nf_ct_refresh_acct(struct nf_conn *ct, | 203 | extern void __nf_ct_refresh_acct(struct nf_conn *ct, |
195 | enum ip_conntrack_info ctinfo, | 204 | enum ip_conntrack_info ctinfo, |
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 9ee26469c759..a81771210934 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h | |||
@@ -30,7 +30,7 @@ extern void nf_conntrack_cleanup(void); | |||
30 | extern int nf_conntrack_proto_init(void); | 30 | extern int nf_conntrack_proto_init(void); |
31 | extern void nf_conntrack_proto_fini(void); | 31 | extern void nf_conntrack_proto_fini(void); |
32 | 32 | ||
33 | extern int | 33 | extern bool |
34 | nf_ct_get_tuple(const struct sk_buff *skb, | 34 | nf_ct_get_tuple(const struct sk_buff *skb, |
35 | unsigned int nhoff, | 35 | unsigned int nhoff, |
36 | unsigned int dataoff, | 36 | unsigned int dataoff, |
@@ -40,7 +40,7 @@ nf_ct_get_tuple(const struct sk_buff *skb, | |||
40 | const struct nf_conntrack_l3proto *l3proto, | 40 | const struct nf_conntrack_l3proto *l3proto, |
41 | const struct nf_conntrack_l4proto *l4proto); | 41 | const struct nf_conntrack_l4proto *l4proto); |
42 | 42 | ||
43 | extern int | 43 | extern bool |
44 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | 44 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, |
45 | const struct nf_conntrack_tuple *orig, | 45 | const struct nf_conntrack_tuple *orig, |
46 | const struct nf_conntrack_l3proto *l3proto, | 46 | const struct nf_conntrack_l3proto *l3proto, |
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index cb608a1b44e5..dfdf4b459475 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h | |||
@@ -41,6 +41,9 @@ struct nf_conntrack_expect | |||
41 | /* Flags */ | 41 | /* Flags */ |
42 | unsigned int flags; | 42 | unsigned int flags; |
43 | 43 | ||
44 | /* Expectation class */ | ||
45 | unsigned int class; | ||
46 | |||
44 | #ifdef CONFIG_NF_NAT_NEEDED | 47 | #ifdef CONFIG_NF_NAT_NEEDED |
45 | __be32 saved_ip; | 48 | __be32 saved_ip; |
46 | /* This is the original per-proto part, used to map the | 49 | /* This is the original per-proto part, used to map the |
@@ -53,7 +56,16 @@ struct nf_conntrack_expect | |||
53 | struct rcu_head rcu; | 56 | struct rcu_head rcu; |
54 | }; | 57 | }; |
55 | 58 | ||
56 | #define NF_CT_EXPECT_PERMANENT 0x1 | 59 | struct nf_conntrack_expect_policy |
60 | { | ||
61 | unsigned int max_expected; | ||
62 | unsigned int timeout; | ||
63 | }; | ||
64 | |||
65 | #define NF_CT_EXPECT_CLASS_DEFAULT 0 | ||
66 | |||
67 | #define NF_CT_EXPECT_PERMANENT 0x1 | ||
68 | #define NF_CT_EXPECT_INACTIVE 0x2 | ||
57 | 69 | ||
58 | int nf_conntrack_expect_init(void); | 70 | int nf_conntrack_expect_init(void); |
59 | void nf_conntrack_expect_fini(void); | 71 | void nf_conntrack_expect_fini(void); |
@@ -74,10 +86,10 @@ void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); | |||
74 | /* Allocate space for an expectation: this is mandatory before calling | 86 | /* Allocate space for an expectation: this is mandatory before calling |
75 | nf_ct_expect_related. You will have to call put afterwards. */ | 87 | nf_ct_expect_related. You will have to call put afterwards. */ |
76 | struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me); | 88 | struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me); |
77 | void nf_ct_expect_init(struct nf_conntrack_expect *, int, | 89 | void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, int, |
78 | union nf_inet_addr *, | 90 | const union nf_inet_addr *, |
79 | union nf_inet_addr *, | 91 | const union nf_inet_addr *, |
80 | u_int8_t, __be16 *, __be16 *); | 92 | u_int8_t, const __be16 *, const __be16 *); |
81 | void nf_ct_expect_put(struct nf_conntrack_expect *exp); | 93 | void nf_ct_expect_put(struct nf_conntrack_expect *exp); |
82 | int nf_ct_expect_related(struct nf_conntrack_expect *expect); | 94 | int nf_ct_expect_related(struct nf_conntrack_expect *expect); |
83 | 95 | ||
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 4ca125e9b3ce..f8060ab5a083 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h | |||
@@ -20,9 +20,7 @@ struct nf_conntrack_helper | |||
20 | 20 | ||
21 | const char *name; /* name of the module */ | 21 | const char *name; /* name of the module */ |
22 | struct module *me; /* pointer to self */ | 22 | struct module *me; /* pointer to self */ |
23 | unsigned int max_expected; /* Maximum number of concurrent | 23 | const struct nf_conntrack_expect_policy *expect_policy; |
24 | * expected connections */ | ||
25 | unsigned int timeout; /* timeout for expecteds */ | ||
26 | 24 | ||
27 | /* Tuple of things we will help (compared against server response) */ | 25 | /* Tuple of things we will help (compared against server response) */ |
28 | struct nf_conntrack_tuple tuple; | 26 | struct nf_conntrack_tuple tuple; |
@@ -37,6 +35,7 @@ struct nf_conntrack_helper | |||
37 | void (*destroy)(struct nf_conn *ct); | 35 | void (*destroy)(struct nf_conn *ct); |
38 | 36 | ||
39 | int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct); | 37 | int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct); |
38 | unsigned int expect_class_max; | ||
40 | }; | 39 | }; |
41 | 40 | ||
42 | extern struct nf_conntrack_helper * | 41 | extern struct nf_conntrack_helper * |
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index b886e3ae6cad..0378676c3dd8 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h | |||
@@ -28,31 +28,20 @@ struct nf_conntrack_l3proto | |||
28 | * Try to fill in the third arg: nhoff is offset of l3 proto | 28 | * Try to fill in the third arg: nhoff is offset of l3 proto |
29 | * hdr. Return true if possible. | 29 | * hdr. Return true if possible. |
30 | */ | 30 | */ |
31 | int (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff, | 31 | bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff, |
32 | struct nf_conntrack_tuple *tuple); | 32 | struct nf_conntrack_tuple *tuple); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Invert the per-proto part of the tuple: ie. turn xmit into reply. | 35 | * Invert the per-proto part of the tuple: ie. turn xmit into reply. |
36 | * Some packets can't be inverted: return 0 in that case. | 36 | * Some packets can't be inverted: return 0 in that case. |
37 | */ | 37 | */ |
38 | int (*invert_tuple)(struct nf_conntrack_tuple *inverse, | 38 | bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, |
39 | const struct nf_conntrack_tuple *orig); | 39 | const struct nf_conntrack_tuple *orig); |
40 | 40 | ||
41 | /* Print out the per-protocol part of the tuple. */ | 41 | /* Print out the per-protocol part of the tuple. */ |
42 | int (*print_tuple)(struct seq_file *s, | 42 | int (*print_tuple)(struct seq_file *s, |
43 | const struct nf_conntrack_tuple *); | 43 | const struct nf_conntrack_tuple *); |
44 | 44 | ||
45 | /* Returns verdict for packet, or -1 for invalid. */ | ||
46 | int (*packet)(struct nf_conn *ct, | ||
47 | const struct sk_buff *skb, | ||
48 | enum ip_conntrack_info ctinfo); | ||
49 | |||
50 | /* | ||
51 | * Called when a new connection for this protocol found; | ||
52 | * returns TRUE if it's OK. If so, packet() called next. | ||
53 | */ | ||
54 | int (*new)(struct nf_conn *ct, const struct sk_buff *skb); | ||
55 | |||
56 | /* | 45 | /* |
57 | * Called before tracking. | 46 | * Called before tracking. |
58 | * *dataoff: offset of protocol header (TCP, UDP,...) in skb | 47 | * *dataoff: offset of protocol header (TCP, UDP,...) in skb |
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index efc16eccddb1..723df9d1cc35 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h | |||
@@ -25,15 +25,14 @@ struct nf_conntrack_l4proto | |||
25 | 25 | ||
26 | /* Try to fill in the third arg: dataoff is offset past network protocol | 26 | /* Try to fill in the third arg: dataoff is offset past network protocol |
27 | hdr. Return true if possible. */ | 27 | hdr. Return true if possible. */ |
28 | int (*pkt_to_tuple)(const struct sk_buff *skb, | 28 | bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, |
29 | unsigned int dataoff, | 29 | struct nf_conntrack_tuple *tuple); |
30 | struct nf_conntrack_tuple *tuple); | ||
31 | 30 | ||
32 | /* Invert the per-proto part of the tuple: ie. turn xmit into reply. | 31 | /* Invert the per-proto part of the tuple: ie. turn xmit into reply. |
33 | * Some packets can't be inverted: return 0 in that case. | 32 | * Some packets can't be inverted: return 0 in that case. |
34 | */ | 33 | */ |
35 | int (*invert_tuple)(struct nf_conntrack_tuple *inverse, | 34 | bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, |
36 | const struct nf_conntrack_tuple *orig); | 35 | const struct nf_conntrack_tuple *orig); |
37 | 36 | ||
38 | /* Returns verdict for packet, or -1 for invalid. */ | 37 | /* Returns verdict for packet, or -1 for invalid. */ |
39 | int (*packet)(struct nf_conn *ct, | 38 | int (*packet)(struct nf_conn *ct, |
@@ -45,8 +44,8 @@ struct nf_conntrack_l4proto | |||
45 | 44 | ||
46 | /* Called when a new connection for this protocol found; | 45 | /* Called when a new connection for this protocol found; |
47 | * returns TRUE if it's OK. If so, packet() called next. */ | 46 | * returns TRUE if it's OK. If so, packet() called next. */ |
48 | int (*new)(struct nf_conn *ct, const struct sk_buff *skb, | 47 | bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, |
49 | unsigned int dataoff); | 48 | unsigned int dataoff); |
50 | 49 | ||
51 | /* Called when a conntrack entry is destroyed */ | 50 | /* Called when a conntrack entry is destroyed */ |
52 | void (*destroy)(struct nf_conn *ct); | 51 | void (*destroy)(struct nf_conn *ct); |
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index e69ab2e87597..1bb7087833d3 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h | |||
@@ -41,6 +41,9 @@ union nf_conntrack_man_proto | |||
41 | } icmp; | 41 | } icmp; |
42 | struct { | 42 | struct { |
43 | __be16 port; | 43 | __be16 port; |
44 | } dccp; | ||
45 | struct { | ||
46 | __be16 port; | ||
44 | } sctp; | 47 | } sctp; |
45 | struct { | 48 | struct { |
46 | __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ | 49 | __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ |
@@ -79,6 +82,9 @@ struct nf_conntrack_tuple | |||
79 | } icmp; | 82 | } icmp; |
80 | struct { | 83 | struct { |
81 | __be16 port; | 84 | __be16 port; |
85 | } dccp; | ||
86 | struct { | ||
87 | __be16 port; | ||
82 | } sctp; | 88 | } sctp; |
83 | struct { | 89 | struct { |
84 | __be16 key; | 90 | __be16 key; |
@@ -113,11 +119,37 @@ struct nf_conntrack_tuple_mask | |||
113 | 119 | ||
114 | #ifdef __KERNEL__ | 120 | #ifdef __KERNEL__ |
115 | 121 | ||
116 | #define NF_CT_DUMP_TUPLE(tp) \ | 122 | static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t) |
117 | pr_debug("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \ | 123 | { |
118 | (tp), (tp)->src.l3num, (tp)->dst.protonum, \ | 124 | #ifdef DEBUG |
119 | NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \ | 125 | printk("tuple %p: %u " NIPQUAD_FMT ":%hu -> " NIPQUAD_FMT ":%hu\n", |
120 | NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all)) | 126 | t, t->dst.protonum, |
127 | NIPQUAD(t->src.u3.ip), ntohs(t->src.u.all), | ||
128 | NIPQUAD(t->dst.u3.ip), ntohs(t->dst.u.all)); | ||
129 | #endif | ||
130 | } | ||
131 | |||
132 | static inline void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t) | ||
133 | { | ||
134 | #ifdef DEBUG | ||
135 | printk("tuple %p: %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", | ||
136 | t, t->dst.protonum, | ||
137 | NIP6(*(struct in6_addr *)t->src.u3.all), ntohs(t->src.u.all), | ||
138 | NIP6(*(struct in6_addr *)t->dst.u3.all), ntohs(t->dst.u.all)); | ||
139 | #endif | ||
140 | } | ||
141 | |||
142 | static inline void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t) | ||
143 | { | ||
144 | switch (t->src.l3num) { | ||
145 | case AF_INET: | ||
146 | nf_ct_dump_tuple_ip(t); | ||
147 | break; | ||
148 | case AF_INET6: | ||
149 | nf_ct_dump_tuple_ipv6(t); | ||
150 | break; | ||
151 | } | ||
152 | } | ||
121 | 153 | ||
122 | /* If we're the first tuple, it's the original dir. */ | 154 | /* If we're the first tuple, it's the original dir. */ |
123 | #define NF_CT_DIRECTION(h) \ | 155 | #define NF_CT_DIRECTION(h) \ |
@@ -132,70 +164,64 @@ struct nf_conntrack_tuple_hash | |||
132 | 164 | ||
133 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
134 | 166 | ||
135 | static inline int __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1, | 167 | static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1, |
136 | const struct nf_conntrack_tuple *t2) | 168 | const struct nf_conntrack_tuple *t2) |
137 | { | 169 | { |
138 | return (t1->src.u3.all[0] == t2->src.u3.all[0] && | 170 | return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) && |
139 | t1->src.u3.all[1] == t2->src.u3.all[1] && | ||
140 | t1->src.u3.all[2] == t2->src.u3.all[2] && | ||
141 | t1->src.u3.all[3] == t2->src.u3.all[3] && | ||
142 | t1->src.u.all == t2->src.u.all && | 171 | t1->src.u.all == t2->src.u.all && |
143 | t1->src.l3num == t2->src.l3num); | 172 | t1->src.l3num == t2->src.l3num); |
144 | } | 173 | } |
145 | 174 | ||
146 | static inline int __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1, | 175 | static inline bool __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1, |
147 | const struct nf_conntrack_tuple *t2) | 176 | const struct nf_conntrack_tuple *t2) |
148 | { | 177 | { |
149 | return (t1->dst.u3.all[0] == t2->dst.u3.all[0] && | 178 | return (nf_inet_addr_cmp(&t1->dst.u3, &t2->dst.u3) && |
150 | t1->dst.u3.all[1] == t2->dst.u3.all[1] && | ||
151 | t1->dst.u3.all[2] == t2->dst.u3.all[2] && | ||
152 | t1->dst.u3.all[3] == t2->dst.u3.all[3] && | ||
153 | t1->dst.u.all == t2->dst.u.all && | 179 | t1->dst.u.all == t2->dst.u.all && |
154 | t1->dst.protonum == t2->dst.protonum); | 180 | t1->dst.protonum == t2->dst.protonum); |
155 | } | 181 | } |
156 | 182 | ||
157 | static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1, | 183 | static inline bool nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1, |
158 | const struct nf_conntrack_tuple *t2) | 184 | const struct nf_conntrack_tuple *t2) |
159 | { | 185 | { |
160 | return __nf_ct_tuple_src_equal(t1, t2) && | 186 | return __nf_ct_tuple_src_equal(t1, t2) && |
161 | __nf_ct_tuple_dst_equal(t1, t2); | 187 | __nf_ct_tuple_dst_equal(t1, t2); |
162 | } | 188 | } |
163 | 189 | ||
164 | static inline int nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1, | 190 | static inline bool |
165 | const struct nf_conntrack_tuple_mask *m2) | 191 | nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1, |
192 | const struct nf_conntrack_tuple_mask *m2) | ||
166 | { | 193 | { |
167 | return (m1->src.u3.all[0] == m2->src.u3.all[0] && | 194 | return (nf_inet_addr_cmp(&m1->src.u3, &m2->src.u3) && |
168 | m1->src.u3.all[1] == m2->src.u3.all[1] && | ||
169 | m1->src.u3.all[2] == m2->src.u3.all[2] && | ||
170 | m1->src.u3.all[3] == m2->src.u3.all[3] && | ||
171 | m1->src.u.all == m2->src.u.all); | 195 | m1->src.u.all == m2->src.u.all); |
172 | } | 196 | } |
173 | 197 | ||
174 | static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1, | 198 | static inline bool |
175 | const struct nf_conntrack_tuple *t2, | 199 | nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1, |
176 | const struct nf_conntrack_tuple_mask *mask) | 200 | const struct nf_conntrack_tuple *t2, |
201 | const struct nf_conntrack_tuple_mask *mask) | ||
177 | { | 202 | { |
178 | int count; | 203 | int count; |
179 | 204 | ||
180 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) { | 205 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) { |
181 | if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) & | 206 | if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) & |
182 | mask->src.u3.all[count]) | 207 | mask->src.u3.all[count]) |
183 | return 0; | 208 | return false; |
184 | } | 209 | } |
185 | 210 | ||
186 | if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all) | 211 | if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all) |
187 | return 0; | 212 | return false; |
188 | 213 | ||
189 | if (t1->src.l3num != t2->src.l3num || | 214 | if (t1->src.l3num != t2->src.l3num || |
190 | t1->dst.protonum != t2->dst.protonum) | 215 | t1->dst.protonum != t2->dst.protonum) |
191 | return 0; | 216 | return false; |
192 | 217 | ||
193 | return 1; | 218 | return true; |
194 | } | 219 | } |
195 | 220 | ||
196 | static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t, | 221 | static inline bool |
197 | const struct nf_conntrack_tuple *tuple, | 222 | nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t, |
198 | const struct nf_conntrack_tuple_mask *mask) | 223 | const struct nf_conntrack_tuple *tuple, |
224 | const struct nf_conntrack_tuple_mask *mask) | ||
199 | { | 225 | { |
200 | return nf_ct_tuple_src_mask_cmp(t, tuple, mask) && | 226 | return nf_ct_tuple_src_mask_cmp(t, tuple, mask) && |
201 | __nf_ct_tuple_dst_equal(t, tuple); | 227 | __nf_ct_tuple_dst_equal(t, tuple); |
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 58dd22687949..237a961f40e1 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h | |||
@@ -24,6 +24,9 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb, | |||
24 | extern int nf_nat_seq_adjust(struct sk_buff *skb, | 24 | extern int nf_nat_seq_adjust(struct sk_buff *skb, |
25 | struct nf_conn *ct, | 25 | struct nf_conn *ct, |
26 | enum ip_conntrack_info ctinfo); | 26 | enum ip_conntrack_info ctinfo); |
27 | extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb, | ||
28 | struct nf_conn *ct, | ||
29 | enum ip_conntrack_info ctinfo); | ||
27 | 30 | ||
28 | /* Setup NAT on this expected conntrack so it follows master, but goes | 31 | /* Setup NAT on this expected conntrack so it follows master, but goes |
29 | * to port ct->master->saved_proto. */ | 32 | * to port ct->master->saved_proto. */ |
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h index 4aa0edbb5b96..f3662c4394ef 100644 --- a/include/net/netfilter/nf_nat_protocol.h +++ b/include/net/netfilter/nf_nat_protocol.h | |||
@@ -8,9 +8,6 @@ struct nf_nat_range; | |||
8 | 8 | ||
9 | struct nf_nat_protocol | 9 | struct nf_nat_protocol |
10 | { | 10 | { |
11 | /* Protocol name */ | ||
12 | const char *name; | ||
13 | |||
14 | /* Protocol number. */ | 11 | /* Protocol number. */ |
15 | unsigned int protonum; | 12 | unsigned int protonum; |
16 | 13 | ||
@@ -18,25 +15,25 @@ struct nf_nat_protocol | |||
18 | 15 | ||
19 | /* Translate a packet to the target according to manip type. | 16 | /* Translate a packet to the target according to manip type. |
20 | Return true if succeeded. */ | 17 | Return true if succeeded. */ |
21 | int (*manip_pkt)(struct sk_buff *skb, | 18 | bool (*manip_pkt)(struct sk_buff *skb, |
22 | unsigned int iphdroff, | 19 | unsigned int iphdroff, |
23 | const struct nf_conntrack_tuple *tuple, | 20 | const struct nf_conntrack_tuple *tuple, |
24 | enum nf_nat_manip_type maniptype); | 21 | enum nf_nat_manip_type maniptype); |
25 | 22 | ||
26 | /* Is the manipable part of the tuple between min and max incl? */ | 23 | /* Is the manipable part of the tuple between min and max incl? */ |
27 | int (*in_range)(const struct nf_conntrack_tuple *tuple, | 24 | bool (*in_range)(const struct nf_conntrack_tuple *tuple, |
28 | enum nf_nat_manip_type maniptype, | 25 | enum nf_nat_manip_type maniptype, |
29 | const union nf_conntrack_man_proto *min, | 26 | const union nf_conntrack_man_proto *min, |
30 | const union nf_conntrack_man_proto *max); | 27 | const union nf_conntrack_man_proto *max); |
31 | 28 | ||
32 | /* Alter the per-proto part of the tuple (depending on | 29 | /* Alter the per-proto part of the tuple (depending on |
33 | maniptype), to give a unique tuple in the given range if | 30 | maniptype), to give a unique tuple in the given range if |
34 | possible; return false if not. Per-protocol part of tuple | 31 | possible; return false if not. Per-protocol part of tuple |
35 | is initialized to the incoming packet. */ | 32 | is initialized to the incoming packet. */ |
36 | int (*unique_tuple)(struct nf_conntrack_tuple *tuple, | 33 | bool (*unique_tuple)(struct nf_conntrack_tuple *tuple, |
37 | const struct nf_nat_range *range, | 34 | const struct nf_nat_range *range, |
38 | enum nf_nat_manip_type maniptype, | 35 | enum nf_nat_manip_type maniptype, |
39 | const struct nf_conn *ct); | 36 | const struct nf_conn *ct); |
40 | 37 | ||
41 | int (*range_to_nlattr)(struct sk_buff *skb, | 38 | int (*range_to_nlattr)(struct sk_buff *skb, |
42 | const struct nf_nat_range *range); | 39 | const struct nf_nat_range *range); |
@@ -62,9 +59,20 @@ extern int init_protocols(void) __init; | |||
62 | extern void cleanup_protocols(void); | 59 | extern void cleanup_protocols(void); |
63 | extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum); | 60 | extern const struct nf_nat_protocol *find_nat_proto(u_int16_t protonum); |
64 | 61 | ||
65 | extern int nf_nat_port_range_to_nlattr(struct sk_buff *skb, | 62 | extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, |
66 | const struct nf_nat_range *range); | 63 | enum nf_nat_manip_type maniptype, |
67 | extern int nf_nat_port_nlattr_to_range(struct nlattr *tb[], | 64 | const union nf_conntrack_man_proto *min, |
68 | struct nf_nat_range *range); | 65 | const union nf_conntrack_man_proto *max); |
66 | |||
67 | extern bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
68 | const struct nf_nat_range *range, | ||
69 | enum nf_nat_manip_type maniptype, | ||
70 | const struct nf_conn *ct, | ||
71 | u_int16_t *rover); | ||
72 | |||
73 | extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb, | ||
74 | const struct nf_nat_range *range); | ||
75 | extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], | ||
76 | struct nf_nat_range *range); | ||
69 | 77 | ||
70 | #endif /*_NF_NAT_PROTO_H*/ | 78 | #endif /*_NF_NAT_PROTO_H*/ |
diff --git a/include/net/netfilter/nf_nat_rule.h b/include/net/netfilter/nf_nat_rule.h index 75d1825031d7..e4a18ae361c6 100644 --- a/include/net/netfilter/nf_nat_rule.h +++ b/include/net/netfilter/nf_nat_rule.h | |||
@@ -14,7 +14,4 @@ extern int nf_nat_rule_find(struct sk_buff *skb, | |||
14 | 14 | ||
15 | extern unsigned int | 15 | extern unsigned int |
16 | alloc_null_binding(struct nf_conn *ct, unsigned int hooknum); | 16 | alloc_null_binding(struct nf_conn *ct, unsigned int hooknum); |
17 | |||
18 | extern unsigned int | ||
19 | alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum); | ||
20 | #endif /* _NF_NAT_RULE_H */ | 17 | #endif /* _NF_NAT_RULE_H */ |
diff --git a/include/net/netlabel.h b/include/net/netlabel.h index 0ca67d73c7ad..5e53a85b5ca1 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h | |||
@@ -162,7 +162,7 @@ struct netlbl_lsm_secattr_catmap { | |||
162 | 162 | ||
163 | /** | 163 | /** |
164 | * struct netlbl_lsm_secattr - NetLabel LSM security attributes | 164 | * struct netlbl_lsm_secattr - NetLabel LSM security attributes |
165 | * @flags: indicate which attributes are contained in this structure | 165 | * @flags: indicate structure attributes, see NETLBL_SECATTR_* |
166 | * @type: indicate the NLTYPE of the attributes | 166 | * @type: indicate the NLTYPE of the attributes |
167 | * @domain: the NetLabel LSM domain | 167 | * @domain: the NetLabel LSM domain |
168 | * @cache: NetLabel LSM specific cache | 168 | * @cache: NetLabel LSM specific cache |
@@ -180,17 +180,22 @@ struct netlbl_lsm_secattr_catmap { | |||
180 | * NetLabel itself when returning security attributes to the LSM. | 180 | * NetLabel itself when returning security attributes to the LSM. |
181 | * | 181 | * |
182 | */ | 182 | */ |
183 | struct netlbl_lsm_secattr { | ||
184 | u32 flags; | ||
185 | /* bitmap values for 'flags' */ | ||
183 | #define NETLBL_SECATTR_NONE 0x00000000 | 186 | #define NETLBL_SECATTR_NONE 0x00000000 |
184 | #define NETLBL_SECATTR_DOMAIN 0x00000001 | 187 | #define NETLBL_SECATTR_DOMAIN 0x00000001 |
188 | #define NETLBL_SECATTR_DOMAIN_CPY (NETLBL_SECATTR_DOMAIN | \ | ||
189 | NETLBL_SECATTR_FREE_DOMAIN) | ||
185 | #define NETLBL_SECATTR_CACHE 0x00000002 | 190 | #define NETLBL_SECATTR_CACHE 0x00000002 |
186 | #define NETLBL_SECATTR_MLS_LVL 0x00000004 | 191 | #define NETLBL_SECATTR_MLS_LVL 0x00000004 |
187 | #define NETLBL_SECATTR_MLS_CAT 0x00000008 | 192 | #define NETLBL_SECATTR_MLS_CAT 0x00000008 |
188 | #define NETLBL_SECATTR_SECID 0x00000010 | 193 | #define NETLBL_SECATTR_SECID 0x00000010 |
194 | /* bitmap meta-values for 'flags' */ | ||
195 | #define NETLBL_SECATTR_FREE_DOMAIN 0x01000000 | ||
189 | #define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \ | 196 | #define NETLBL_SECATTR_CACHEABLE (NETLBL_SECATTR_MLS_LVL | \ |
190 | NETLBL_SECATTR_MLS_CAT | \ | 197 | NETLBL_SECATTR_MLS_CAT | \ |
191 | NETLBL_SECATTR_SECID) | 198 | NETLBL_SECATTR_SECID) |
192 | struct netlbl_lsm_secattr { | ||
193 | u32 flags; | ||
194 | u32 type; | 199 | u32 type; |
195 | char *domain; | 200 | char *domain; |
196 | struct netlbl_lsm_cache *cache; | 201 | struct netlbl_lsm_cache *cache; |
@@ -303,7 +308,8 @@ static inline void netlbl_secattr_init(struct netlbl_lsm_secattr *secattr) | |||
303 | */ | 308 | */ |
304 | static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) | 309 | static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) |
305 | { | 310 | { |
306 | kfree(secattr->domain); | 311 | if (secattr->flags & NETLBL_SECATTR_FREE_DOMAIN) |
312 | kfree(secattr->domain); | ||
307 | if (secattr->flags & NETLBL_SECATTR_CACHE) | 313 | if (secattr->flags & NETLBL_SECATTR_CACHE) |
308 | netlbl_secattr_cache_free(secattr->cache); | 314 | netlbl_secattr_cache_free(secattr->cache); |
309 | if (secattr->flags & NETLBL_SECATTR_MLS_CAT) | 315 | if (secattr->flags & NETLBL_SECATTR_MLS_CAT) |
diff --git a/include/net/netns/core.h b/include/net/netns/core.h new file mode 100644 index 000000000000..24d4be76bbd1 --- /dev/null +++ b/include/net/netns/core.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef __NETNS_CORE_H__ | ||
2 | #define __NETNS_CORE_H__ | ||
3 | |||
4 | struct ctl_table_header; | ||
5 | struct prot_inuse; | ||
6 | |||
7 | struct netns_core { | ||
8 | /* core sysctls */ | ||
9 | struct ctl_table_header *sysctl_hdr; | ||
10 | |||
11 | int sysctl_somaxconn; | ||
12 | |||
13 | struct prot_inuse *inuse; | ||
14 | }; | ||
15 | |||
16 | #endif | ||
diff --git a/include/net/netns/dccp.h b/include/net/netns/dccp.h new file mode 100644 index 000000000000..98d2a7ce1f71 --- /dev/null +++ b/include/net/netns/dccp.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef __NETNS_DCCP_H__ | ||
2 | #define __NETNS_DCCP_H__ | ||
3 | |||
4 | struct sock; | ||
5 | |||
6 | struct netns_dccp { | ||
7 | struct sock *v4_ctl_sk; | ||
8 | struct sock *v6_ctl_sk; | ||
9 | }; | ||
10 | |||
11 | #endif | ||
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h new file mode 100644 index 000000000000..0c04fd2a700b --- /dev/null +++ b/include/net/netns/generic.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * generic net pointers | ||
3 | */ | ||
4 | |||
5 | #ifndef __NET_GENERIC_H__ | ||
6 | #define __NET_GENERIC_H__ | ||
7 | |||
8 | #include <linux/rcupdate.h> | ||
9 | |||
10 | /* | ||
11 | * Generic net pointers are to be used by modules to put some private | ||
12 | * stuff on the struct net without explicit struct net modification | ||
13 | * | ||
14 | * The rules are simple: | ||
15 | * 1. register the ops with register_pernet_gen_device to get the id | ||
16 | * of your private pointer; | ||
17 | * 2. call net_assign_generic() to put the private data on the struct | ||
18 | * net (most preferably this should be done in the ->init callback | ||
19 | * of the ops registered); | ||
20 | * 3. do not change this pointer while the net is alive; | ||
21 | * 4. do not try to have any private reference on the net_generic object. | ||
22 | * | ||
23 | * After accomplishing all of the above, the private pointer can be | ||
24 | * accessed with the net_generic() call. | ||
25 | */ | ||
26 | |||
27 | struct net_generic { | ||
28 | unsigned int len; | ||
29 | struct rcu_head rcu; | ||
30 | |||
31 | void *ptr[0]; | ||
32 | }; | ||
33 | |||
34 | static inline void *net_generic(struct net *net, int id) | ||
35 | { | ||
36 | struct net_generic *ng; | ||
37 | void *ptr; | ||
38 | |||
39 | rcu_read_lock(); | ||
40 | ng = rcu_dereference(net->gen); | ||
41 | BUG_ON(id == 0 || id > ng->len); | ||
42 | ptr = ng->ptr[id - 1]; | ||
43 | rcu_read_unlock(); | ||
44 | |||
45 | return ptr; | ||
46 | } | ||
47 | |||
48 | extern int net_assign_generic(struct net *net, int id, void *data); | ||
49 | #endif | ||
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index a9b4f6086294..34ee348a2cf2 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h | |||
@@ -17,6 +17,7 @@ struct netns_ipv4 { | |||
17 | #ifdef CONFIG_SYSCTL | 17 | #ifdef CONFIG_SYSCTL |
18 | struct ctl_table_header *forw_hdr; | 18 | struct ctl_table_header *forw_hdr; |
19 | struct ctl_table_header *frags_hdr; | 19 | struct ctl_table_header *frags_hdr; |
20 | struct ctl_table_header *ipv4_hdr; | ||
20 | #endif | 21 | #endif |
21 | struct ipv4_devconf *devconf_all; | 22 | struct ipv4_devconf *devconf_all; |
22 | struct ipv4_devconf *devconf_dflt; | 23 | struct ipv4_devconf *devconf_dflt; |
@@ -26,6 +27,9 @@ struct netns_ipv4 { | |||
26 | struct hlist_head *fib_table_hash; | 27 | struct hlist_head *fib_table_hash; |
27 | struct sock *fibnl; | 28 | struct sock *fibnl; |
28 | 29 | ||
30 | struct sock **icmp_sk; | ||
31 | struct sock *tcp_sock; | ||
32 | |||
29 | struct netns_frags frags; | 33 | struct netns_frags frags; |
30 | #ifdef CONFIG_NETFILTER | 34 | #ifdef CONFIG_NETFILTER |
31 | struct xt_table *iptable_filter; | 35 | struct xt_table *iptable_filter; |
@@ -33,5 +37,12 @@ struct netns_ipv4 { | |||
33 | struct xt_table *iptable_raw; | 37 | struct xt_table *iptable_raw; |
34 | struct xt_table *arptable_filter; | 38 | struct xt_table *arptable_filter; |
35 | #endif | 39 | #endif |
40 | |||
41 | int sysctl_icmp_echo_ignore_all; | ||
42 | int sysctl_icmp_echo_ignore_broadcasts; | ||
43 | int sysctl_icmp_ignore_bogus_error_responses; | ||
44 | int sysctl_icmp_ratelimit; | ||
45 | int sysctl_icmp_ratemask; | ||
46 | int sysctl_icmp_errors_use_inbound_ifaddr; | ||
36 | }; | 47 | }; |
37 | #endif | 48 | #endif |
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 1dd7de4e4195..ac053be6c256 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h | |||
@@ -36,5 +36,23 @@ struct netns_ipv6 { | |||
36 | struct xt_table *ip6table_mangle; | 36 | struct xt_table *ip6table_mangle; |
37 | struct xt_table *ip6table_raw; | 37 | struct xt_table *ip6table_raw; |
38 | #endif | 38 | #endif |
39 | struct rt6_info *ip6_null_entry; | ||
40 | struct rt6_statistics *rt6_stats; | ||
41 | struct timer_list *ip6_fib_timer; | ||
42 | struct hlist_head *fib_table_hash; | ||
43 | struct fib6_table *fib6_main_tbl; | ||
44 | struct dst_ops *ip6_dst_ops; | ||
45 | unsigned int ip6_rt_gc_expire; | ||
46 | unsigned long ip6_rt_last_gc; | ||
47 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | ||
48 | struct rt6_info *ip6_prohibit_entry; | ||
49 | struct rt6_info *ip6_blk_hole_entry; | ||
50 | struct fib6_table *fib6_local_tbl; | ||
51 | struct fib_rules_ops *fib6_rules_ops; | ||
52 | #endif | ||
53 | struct sock **icmp_sk; | ||
54 | struct sock *ndisc_sk; | ||
55 | struct sock *tcp_sk; | ||
56 | struct sock *igmp_sk; | ||
39 | }; | 57 | }; |
40 | #endif | 58 | #endif |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index d349c66ef828..aa9e282db485 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -353,7 +353,7 @@ tcf_match_indev(struct sk_buff *skb, char *indev) | |||
353 | if (indev[0]) { | 353 | if (indev[0]) { |
354 | if (!skb->iif) | 354 | if (!skb->iif) |
355 | return 0; | 355 | return 0; |
356 | dev = __dev_get_by_index(&init_net, skb->iif); | 356 | dev = __dev_get_by_index(dev_net(skb->dev), skb->iif); |
357 | if (!dev || strcmp(indev, dev->name)) | 357 | if (!dev || strcmp(indev, dev->name)) |
358 | return 0; | 358 | return 0; |
359 | } | 359 | } |
diff --git a/include/net/protocol.h b/include/net/protocol.h index ad8c584233a6..8d024d7cb741 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h | |||
@@ -39,7 +39,8 @@ struct net_protocol { | |||
39 | int (*gso_send_check)(struct sk_buff *skb); | 39 | int (*gso_send_check)(struct sk_buff *skb); |
40 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 40 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
41 | int features); | 41 | int features); |
42 | int no_policy; | 42 | unsigned int no_policy:1, |
43 | netns_ok:1; | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) | 46 | #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) |
diff --git a/include/net/raw.h b/include/net/raw.h index 1828f81fe374..6c14a656357a 100644 --- a/include/net/raw.h +++ b/include/net/raw.h | |||
@@ -53,7 +53,7 @@ int raw_seq_open(struct inode *ino, struct file *file, | |||
53 | 53 | ||
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | void raw_hash_sk(struct sock *sk, struct raw_hashinfo *h); | 56 | void raw_hash_sk(struct sock *sk); |
57 | void raw_unhash_sk(struct sock *sk, struct raw_hashinfo *h); | 57 | void raw_unhash_sk(struct sock *sk); |
58 | 58 | ||
59 | #endif /* _RAW_H */ | 59 | #endif /* _RAW_H */ |
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index cff4608179c1..b220b5f624de 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
@@ -31,8 +31,7 @@ struct request_sock_ops { | |||
31 | int obj_size; | 31 | int obj_size; |
32 | struct kmem_cache *slab; | 32 | struct kmem_cache *slab; |
33 | int (*rtx_syn_ack)(struct sock *sk, | 33 | int (*rtx_syn_ack)(struct sock *sk, |
34 | struct request_sock *req, | 34 | struct request_sock *req); |
35 | struct dst_entry *dst); | ||
36 | void (*send_ack)(struct sk_buff *skb, | 35 | void (*send_ack)(struct sk_buff *skb, |
37 | struct request_sock *req); | 36 | struct request_sock *req); |
38 | void (*send_reset)(struct sock *sk, | 37 | void (*send_reset)(struct sock *sk, |
@@ -46,7 +45,7 @@ struct request_sock { | |||
46 | struct request_sock *dl_next; /* Must be first member! */ | 45 | struct request_sock *dl_next; /* Must be first member! */ |
47 | u16 mss; | 46 | u16 mss; |
48 | u8 retrans; | 47 | u8 retrans; |
49 | u8 __pad; | 48 | u8 cookie_ts; /* syncookie: encode tcpopts in timestamp */ |
50 | /* The following two fields can be easily recomputed I think -AK */ | 49 | /* The following two fields can be easily recomputed I think -AK */ |
51 | u32 window_clamp; /* window clamp at creation time */ | 50 | u32 window_clamp; /* window clamp at creation time */ |
52 | u32 rcv_wnd; /* rcv_wnd offered first time */ | 51 | u32 rcv_wnd; /* rcv_wnd offered first time */ |
@@ -116,8 +115,8 @@ struct request_sock_queue { | |||
116 | struct request_sock *rskq_accept_head; | 115 | struct request_sock *rskq_accept_head; |
117 | struct request_sock *rskq_accept_tail; | 116 | struct request_sock *rskq_accept_tail; |
118 | rwlock_t syn_wait_lock; | 117 | rwlock_t syn_wait_lock; |
119 | u8 rskq_defer_accept; | 118 | u16 rskq_defer_accept; |
120 | /* 3 bytes hole, try to pack */ | 119 | /* 2 bytes hole, try to pack */ |
121 | struct listen_sock *listen_opt; | 120 | struct listen_sock *listen_opt; |
122 | }; | 121 | }; |
123 | 122 | ||
diff --git a/include/net/route.h b/include/net/route.h index eadad5901429..c6338802e8f1 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/ip.h> | 34 | #include <linux/ip.h> |
35 | #include <linux/cache.h> | 35 | #include <linux/cache.h> |
36 | #include <linux/security.h> | 36 | #include <linux/security.h> |
37 | #include <net/sock.h> | ||
38 | 37 | ||
39 | #ifndef __KERNEL__ | 38 | #ifndef __KERNEL__ |
40 | #warning This file is not supposed to be used outside of kernel. | 39 | #warning This file is not supposed to be used outside of kernel. |
@@ -161,7 +160,7 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst, | |||
161 | .dport = dport } } }; | 160 | .dport = dport } } }; |
162 | 161 | ||
163 | int err; | 162 | int err; |
164 | struct net *net = sk->sk_net; | 163 | struct net *net = sock_net(sk); |
165 | if (!dst || !src) { | 164 | if (!dst || !src) { |
166 | err = __ip_route_output_key(net, rp, &fl); | 165 | err = __ip_route_output_key(net, rp, &fl); |
167 | if (err) | 166 | if (err) |
@@ -189,7 +188,7 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol, | |||
189 | ip_rt_put(*rp); | 188 | ip_rt_put(*rp); |
190 | *rp = NULL; | 189 | *rp = NULL; |
191 | security_sk_classify_flow(sk, &fl); | 190 | security_sk_classify_flow(sk, &fl); |
192 | return ip_route_output_flow(sk->sk_net, rp, &fl, sk, 0); | 191 | return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0); |
193 | } | 192 | } |
194 | return 0; | 193 | return 0; |
195 | } | 194 | } |
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 793863e09c69..3c1895e54b7f 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h | |||
@@ -74,6 +74,7 @@ struct rtnl_link_ops { | |||
74 | 74 | ||
75 | extern int __rtnl_link_register(struct rtnl_link_ops *ops); | 75 | extern int __rtnl_link_register(struct rtnl_link_ops *ops); |
76 | extern void __rtnl_link_unregister(struct rtnl_link_ops *ops); | 76 | extern void __rtnl_link_unregister(struct rtnl_link_ops *ops); |
77 | extern void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops); | ||
77 | 78 | ||
78 | extern int rtnl_link_register(struct rtnl_link_ops *ops); | 79 | extern int rtnl_link_register(struct rtnl_link_ops *ops); |
79 | extern void rtnl_link_unregister(struct rtnl_link_ops *ops); | 80 | extern void rtnl_link_unregister(struct rtnl_link_ops *ops); |
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 10ae2da6f93b..88988ab03d75 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h | |||
@@ -104,6 +104,7 @@ typedef enum { | |||
104 | SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */ | 104 | SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */ |
105 | SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */ | 105 | SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */ |
106 | SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ | 106 | SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ |
107 | SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ | ||
107 | SCTP_CMD_LAST | 108 | SCTP_CMD_LAST |
108 | } sctp_verb_t; | 109 | } sctp_verb_t; |
109 | 110 | ||
@@ -205,12 +206,11 @@ typedef struct { | |||
205 | int sctp_init_cmd_seq(sctp_cmd_seq_t *seq); | 206 | int sctp_init_cmd_seq(sctp_cmd_seq_t *seq); |
206 | 207 | ||
207 | /* Add a command to an sctp_cmd_seq_t. | 208 | /* Add a command to an sctp_cmd_seq_t. |
208 | * Return 0 if the command sequence is full. | ||
209 | * | 209 | * |
210 | * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above | 210 | * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above |
211 | * to wrap data which goes in the obj argument. | 211 | * to wrap data which goes in the obj argument. |
212 | */ | 212 | */ |
213 | int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj); | 213 | void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj); |
214 | 214 | ||
215 | /* Return the next command structure in an sctp_cmd_seq. | 215 | /* Return the next command structure in an sctp_cmd_seq. |
216 | * Return NULL at the end of the sequence. | 216 | * Return NULL at the end of the sequence. |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ea806732b084..90b1e8d23b16 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -368,11 +368,6 @@ void sctp_sysctl_unregister(void); | |||
368 | #else | 368 | #else |
369 | static inline void sctp_sysctl_register(void) { return; } | 369 | static inline void sctp_sysctl_register(void) { return; } |
370 | static inline void sctp_sysctl_unregister(void) { return; } | 370 | static inline void sctp_sysctl_unregister(void) { return; } |
371 | static inline int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int nlen, | ||
372 | void __user *oldval, size_t __user *oldlenp, | ||
373 | void __user *newval, size_t newlen) { | ||
374 | return -ENOSYS; | ||
375 | } | ||
376 | #endif | 371 | #endif |
377 | 372 | ||
378 | /* Size of Supported Address Parameter for 'x' address types. */ | 373 | /* Size of Supported Address Parameter for 'x' address types. */ |
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index ef9e7ed2c82e..24811732bdb2 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -385,14 +385,6 @@ static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) | |||
385 | return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT)); | 385 | return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT)); |
386 | } | 386 | } |
387 | 387 | ||
388 | |||
389 | /* Run sctp_add_cmd() generating a BUG() if there is a failure. */ | ||
390 | static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) | ||
391 | { | ||
392 | if (unlikely(!sctp_add_cmd(seq, verb, obj))) | ||
393 | BUG(); | ||
394 | } | ||
395 | |||
396 | /* Check VTAG of the packet matches the sender's own tag. */ | 388 | /* Check VTAG of the packet matches the sender's own tag. */ |
397 | static inline int | 389 | static inline int |
398 | sctp_vtag_verify(const struct sctp_chunk *chunk, | 390 | sctp_vtag_verify(const struct sctp_chunk *chunk, |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 9c827a749b6f..0ce0443c5b79 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -637,8 +637,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, | |||
637 | struct sctp_sndrcvinfo *, | 637 | struct sctp_sndrcvinfo *, |
638 | struct msghdr *, int len); | 638 | struct msghdr *, int len); |
639 | void sctp_datamsg_put(struct sctp_datamsg *); | 639 | void sctp_datamsg_put(struct sctp_datamsg *); |
640 | void sctp_datamsg_free(struct sctp_datamsg *); | ||
641 | void sctp_datamsg_track(struct sctp_chunk *); | ||
642 | void sctp_chunk_fail(struct sctp_chunk *, int error); | 640 | void sctp_chunk_fail(struct sctp_chunk *, int error); |
643 | int sctp_chunk_abandoned(struct sctp_chunk *); | 641 | int sctp_chunk_abandoned(struct sctp_chunk *); |
644 | 642 | ||
@@ -1661,6 +1659,9 @@ struct sctp_association { | |||
1661 | /* Transport to which SHUTDOWN chunk was last sent. */ | 1659 | /* Transport to which SHUTDOWN chunk was last sent. */ |
1662 | struct sctp_transport *shutdown_last_sent_to; | 1660 | struct sctp_transport *shutdown_last_sent_to; |
1663 | 1661 | ||
1662 | /* How many times have we resent a SHUTDOWN */ | ||
1663 | int shutdown_retries; | ||
1664 | |||
1664 | /* Transport to which INIT chunk was last sent. */ | 1665 | /* Transport to which INIT chunk was last sent. */ |
1665 | struct sctp_transport *init_last_sent_to; | 1666 | struct sctp_transport *init_last_sent_to; |
1666 | 1667 | ||
@@ -1695,6 +1696,11 @@ struct sctp_association { | |||
1695 | */ | 1696 | */ |
1696 | __u16 unack_data; | 1697 | __u16 unack_data; |
1697 | 1698 | ||
1699 | /* The total number of data chunks that we've had to retransmit | ||
1700 | * as the result of a T3 timer expiration | ||
1701 | */ | ||
1702 | __u32 rtx_data_chunks; | ||
1703 | |||
1698 | /* This is the association's receive buffer space. This value is used | 1704 | /* This is the association's receive buffer space. This value is used |
1699 | * to set a_rwnd field in an INIT or a SACK chunk. | 1705 | * to set a_rwnd field in an INIT or a SACK chunk. |
1700 | */ | 1706 | */ |
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 9bcfc12275e8..7ea12e8e6676 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h | |||
@@ -67,7 +67,7 @@ struct sctp_ulpevent { | |||
67 | }; | 67 | }; |
68 | 68 | ||
69 | /* Retrieve the skb this event sits inside of. */ | 69 | /* Retrieve the skb this event sits inside of. */ |
70 | static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev) | 70 | static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev) |
71 | { | 71 | { |
72 | return container_of((void *)ev, struct sk_buff, cb); | 72 | return container_of((void *)ev, struct sk_buff, cb); |
73 | } | 73 | } |
diff --git a/include/net/sock.h b/include/net/sock.h index fd9876087651..dc42b44c2aa1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <linux/module.h> | 47 | #include <linux/module.h> |
48 | #include <linux/lockdep.h> | 48 | #include <linux/lockdep.h> |
49 | #include <linux/netdevice.h> | 49 | #include <linux/netdevice.h> |
50 | #include <linux/pcounter.h> | ||
51 | #include <linux/skbuff.h> /* struct sk_buff */ | 50 | #include <linux/skbuff.h> /* struct sk_buff */ |
52 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
53 | #include <linux/security.h> | 52 | #include <linux/security.h> |
@@ -70,7 +69,11 @@ | |||
70 | #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ | 69 | #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ |
71 | printk(KERN_DEBUG msg); } while (0) | 70 | printk(KERN_DEBUG msg); } while (0) |
72 | #else | 71 | #else |
73 | #define SOCK_DEBUG(sk, msg...) do { } while (0) | 72 | /* Validate arguments and do nothing */ |
73 | static void inline int __attribute__ ((format (printf, 2, 3))) | ||
74 | SOCK_DEBUG(struct sock *sk, const char *msg, ...) | ||
75 | { | ||
76 | } | ||
74 | #endif | 77 | #endif |
75 | 78 | ||
76 | /* This is the per-socket lock. The spinlock provides a synchronization | 79 | /* This is the per-socket lock. The spinlock provides a synchronization |
@@ -122,7 +125,9 @@ struct sock_common { | |||
122 | atomic_t skc_refcnt; | 125 | atomic_t skc_refcnt; |
123 | unsigned int skc_hash; | 126 | unsigned int skc_hash; |
124 | struct proto *skc_prot; | 127 | struct proto *skc_prot; |
128 | #ifdef CONFIG_NET_NS | ||
125 | struct net *skc_net; | 129 | struct net *skc_net; |
130 | #endif | ||
126 | }; | 131 | }; |
127 | 132 | ||
128 | /** | 133 | /** |
@@ -151,6 +156,7 @@ struct sock_common { | |||
151 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 156 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets |
152 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 157 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) |
153 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) | 158 | * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) |
159 | * @sk_gso_max_size: Maximum GSO segment size to build | ||
154 | * @sk_lingertime: %SO_LINGER l_linger setting | 160 | * @sk_lingertime: %SO_LINGER l_linger setting |
155 | * @sk_backlog: always used with the per-socket spinlock held | 161 | * @sk_backlog: always used with the per-socket spinlock held |
156 | * @sk_callback_lock: used with the callbacks in the end of this struct | 162 | * @sk_callback_lock: used with the callbacks in the end of this struct |
@@ -237,6 +243,7 @@ struct sock { | |||
237 | gfp_t sk_allocation; | 243 | gfp_t sk_allocation; |
238 | int sk_route_caps; | 244 | int sk_route_caps; |
239 | int sk_gso_type; | 245 | int sk_gso_type; |
246 | unsigned int sk_gso_max_size; | ||
240 | int sk_rcvlowat; | 247 | int sk_rcvlowat; |
241 | unsigned long sk_flags; | 248 | unsigned long sk_flags; |
242 | unsigned long sk_lingertime; | 249 | unsigned long sk_lingertime; |
@@ -498,6 +505,7 @@ extern int sk_wait_data(struct sock *sk, long *timeo); | |||
498 | struct request_sock_ops; | 505 | struct request_sock_ops; |
499 | struct timewait_sock_ops; | 506 | struct timewait_sock_ops; |
500 | struct inet_hashinfo; | 507 | struct inet_hashinfo; |
508 | struct raw_hashinfo; | ||
501 | 509 | ||
502 | /* Networking protocol blocks we attach to sockets. | 510 | /* Networking protocol blocks we attach to sockets. |
503 | * socket layer -> transport layer interface | 511 | * socket layer -> transport layer interface |
@@ -553,7 +561,7 @@ struct proto { | |||
553 | 561 | ||
554 | /* Keeping track of sockets in use */ | 562 | /* Keeping track of sockets in use */ |
555 | #ifdef CONFIG_PROC_FS | 563 | #ifdef CONFIG_PROC_FS |
556 | struct pcounter inuse; | 564 | unsigned int inuse_idx; |
557 | #endif | 565 | #endif |
558 | 566 | ||
559 | /* Memory pressure */ | 567 | /* Memory pressure */ |
@@ -580,7 +588,11 @@ struct proto { | |||
580 | struct request_sock_ops *rsk_prot; | 588 | struct request_sock_ops *rsk_prot; |
581 | struct timewait_sock_ops *twsk_prot; | 589 | struct timewait_sock_ops *twsk_prot; |
582 | 590 | ||
583 | struct inet_hashinfo *hashinfo; | 591 | union { |
592 | struct inet_hashinfo *hashinfo; | ||
593 | struct hlist_head *udp_hash; | ||
594 | struct raw_hashinfo *raw_hash; | ||
595 | } h; | ||
584 | 596 | ||
585 | struct module *owner; | 597 | struct module *owner; |
586 | 598 | ||
@@ -622,36 +634,12 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) | |||
622 | 634 | ||
623 | 635 | ||
624 | #ifdef CONFIG_PROC_FS | 636 | #ifdef CONFIG_PROC_FS |
625 | # define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME) | ||
626 | # define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse) | ||
627 | /* Called with local bh disabled */ | 637 | /* Called with local bh disabled */ |
628 | static inline void sock_prot_inuse_add(struct proto *prot, int inc) | 638 | extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); |
629 | { | 639 | extern int sock_prot_inuse_get(struct net *net, struct proto *proto); |
630 | pcounter_add(&prot->inuse, inc); | ||
631 | } | ||
632 | static inline int sock_prot_inuse_init(struct proto *proto) | ||
633 | { | ||
634 | return pcounter_alloc(&proto->inuse); | ||
635 | } | ||
636 | static inline int sock_prot_inuse_get(struct proto *proto) | ||
637 | { | ||
638 | return pcounter_getval(&proto->inuse); | ||
639 | } | ||
640 | static inline void sock_prot_inuse_free(struct proto *proto) | ||
641 | { | ||
642 | pcounter_free(&proto->inuse); | ||
643 | } | ||
644 | #else | 640 | #else |
645 | # define DEFINE_PROTO_INUSE(NAME) | 641 | static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, |
646 | # define REF_PROTO_INUSE(NAME) | 642 | int inc) |
647 | static void inline sock_prot_inuse_add(struct proto *prot, int inc) | ||
648 | { | ||
649 | } | ||
650 | static int inline sock_prot_inuse_init(struct proto *proto) | ||
651 | { | ||
652 | return 0; | ||
653 | } | ||
654 | static void inline sock_prot_inuse_free(struct proto *proto) | ||
655 | { | 643 | { |
656 | } | 644 | } |
657 | #endif | 645 | #endif |
@@ -850,6 +838,7 @@ extern struct sock *sk_alloc(struct net *net, int family, | |||
850 | gfp_t priority, | 838 | gfp_t priority, |
851 | struct proto *prot); | 839 | struct proto *prot); |
852 | extern void sk_free(struct sock *sk); | 840 | extern void sk_free(struct sock *sk); |
841 | extern void sk_release_kernel(struct sock *sk); | ||
853 | extern struct sock *sk_clone(const struct sock *sk, | 842 | extern struct sock *sk_clone(const struct sock *sk, |
854 | const gfp_t priority); | 843 | const gfp_t priority); |
855 | 844 | ||
@@ -939,41 +928,6 @@ extern void sk_common_release(struct sock *sk); | |||
939 | extern void sock_init_data(struct socket *sock, struct sock *sk); | 928 | extern void sock_init_data(struct socket *sock, struct sock *sk); |
940 | 929 | ||
941 | /** | 930 | /** |
942 | * sk_filter - run a packet through a socket filter | ||
943 | * @sk: sock associated with &sk_buff | ||
944 | * @skb: buffer to filter | ||
945 | * @needlock: set to 1 if the sock is not locked by caller. | ||
946 | * | ||
947 | * Run the filter code and then cut skb->data to correct size returned by | ||
948 | * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller | ||
949 | * than pkt_len we keep whole skb->data. This is the socket level | ||
950 | * wrapper to sk_run_filter. It returns 0 if the packet should | ||
951 | * be accepted or -EPERM if the packet should be tossed. | ||
952 | * | ||
953 | */ | ||
954 | |||
955 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | ||
956 | { | ||
957 | int err; | ||
958 | struct sk_filter *filter; | ||
959 | |||
960 | err = security_sock_rcv_skb(sk, skb); | ||
961 | if (err) | ||
962 | return err; | ||
963 | |||
964 | rcu_read_lock_bh(); | ||
965 | filter = rcu_dereference(sk->sk_filter); | ||
966 | if (filter) { | ||
967 | unsigned int pkt_len = sk_run_filter(skb, filter->insns, | ||
968 | filter->len); | ||
969 | err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; | ||
970 | } | ||
971 | rcu_read_unlock_bh(); | ||
972 | |||
973 | return err; | ||
974 | } | ||
975 | |||
976 | /** | ||
977 | * sk_filter_release: Release a socket filter | 931 | * sk_filter_release: Release a socket filter |
978 | * @sk: socket | 932 | * @sk: socket |
979 | * @fp: filter to remove | 933 | * @fp: filter to remove |
@@ -1333,6 +1287,36 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e | |||
1333 | } | 1287 | } |
1334 | #endif | 1288 | #endif |
1335 | 1289 | ||
1290 | static inline | ||
1291 | struct net *sock_net(const struct sock *sk) | ||
1292 | { | ||
1293 | #ifdef CONFIG_NET_NS | ||
1294 | return sk->sk_net; | ||
1295 | #else | ||
1296 | return &init_net; | ||
1297 | #endif | ||
1298 | } | ||
1299 | |||
1300 | static inline | ||
1301 | void sock_net_set(struct sock *sk, struct net *net) | ||
1302 | { | ||
1303 | #ifdef CONFIG_NET_NS | ||
1304 | sk->sk_net = net; | ||
1305 | #endif | ||
1306 | } | ||
1307 | |||
1308 | /* | ||
1309 | * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace. | ||
1310 | * They should not hold a referrence to a namespace in order to allow | ||
1311 | * to stop it. | ||
1312 | * Sockets after sk_change_net should be released using sk_release_kernel | ||
1313 | */ | ||
1314 | static inline void sk_change_net(struct sock *sk, struct net *net) | ||
1315 | { | ||
1316 | put_net(sock_net(sk)); | ||
1317 | sock_net_set(sk, hold_net(net)); | ||
1318 | } | ||
1319 | |||
1336 | extern void sock_enable_timestamp(struct sock *sk); | 1320 | extern void sock_enable_timestamp(struct sock *sk); |
1337 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); | 1321 | extern int sock_get_timestamp(struct sock *, struct timeval __user *); |
1338 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); | 1322 | extern int sock_get_timestampns(struct sock *, struct timespec __user *); |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 4fd3eb2f8ec2..633147cb6bbc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/skbuff.h> | 29 | #include <linux/skbuff.h> |
30 | #include <linux/dmaengine.h> | 30 | #include <linux/dmaengine.h> |
31 | #include <linux/crypto.h> | 31 | #include <linux/crypto.h> |
32 | #include <linux/cryptohash.h> | ||
32 | 33 | ||
33 | #include <net/inet_connection_sock.h> | 34 | #include <net/inet_connection_sock.h> |
34 | #include <net/inet_timewait_sock.h> | 35 | #include <net/inet_timewait_sock.h> |
@@ -138,6 +139,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); | |||
138 | #define MAX_TCP_KEEPINTVL 32767 | 139 | #define MAX_TCP_KEEPINTVL 32767 |
139 | #define MAX_TCP_KEEPCNT 127 | 140 | #define MAX_TCP_KEEPCNT 127 |
140 | #define MAX_TCP_SYNCNT 127 | 141 | #define MAX_TCP_SYNCNT 127 |
142 | #define MAX_TCP_ACCEPT_DEFERRED 65535 | ||
141 | 143 | ||
142 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | 144 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ |
143 | 145 | ||
@@ -434,11 +436,20 @@ extern int tcp_disconnect(struct sock *sk, int flags); | |||
434 | extern void tcp_unhash(struct sock *sk); | 436 | extern void tcp_unhash(struct sock *sk); |
435 | 437 | ||
436 | /* From syncookies.c */ | 438 | /* From syncookies.c */ |
439 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | ||
437 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | 440 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, |
438 | struct ip_options *opt); | 441 | struct ip_options *opt); |
439 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | 442 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, |
440 | __u16 *mss); | 443 | __u16 *mss); |
441 | 444 | ||
445 | extern __u32 cookie_init_timestamp(struct request_sock *req); | ||
446 | extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt); | ||
447 | |||
448 | /* From net/ipv6/syncookies.c */ | ||
449 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); | ||
450 | extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, | ||
451 | __u16 *mss); | ||
452 | |||
442 | /* tcp_output.c */ | 453 | /* tcp_output.c */ |
443 | 454 | ||
444 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | 455 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
@@ -776,11 +787,14 @@ extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); | |||
776 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 787 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
777 | 788 | ||
778 | /* Slow start with delack produces 3 packets of burst, so that | 789 | /* Slow start with delack produces 3 packets of burst, so that |
779 | * it is safe "de facto". | 790 | * it is safe "de facto". This will be the default - same as |
791 | * the default reordering threshold - but if reordering increases, | ||
792 | * we must be able to allow cwnd to burst at least this much in order | ||
793 | * to not pull it back when holes are filled. | ||
780 | */ | 794 | */ |
781 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | 795 | static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) |
782 | { | 796 | { |
783 | return 3; | 797 | return tp->reordering; |
784 | } | 798 | } |
785 | 799 | ||
786 | /* Returns end sequence number of the receiver's advertised window */ | 800 | /* Returns end sequence number of the receiver's advertised window */ |
@@ -950,6 +964,7 @@ static inline void tcp_openreq_init(struct request_sock *req, | |||
950 | struct inet_request_sock *ireq = inet_rsk(req); | 964 | struct inet_request_sock *ireq = inet_rsk(req); |
951 | 965 | ||
952 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ | 966 | req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ |
967 | req->cookie_ts = 0; | ||
953 | tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; | 968 | tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; |
954 | req->mss = rx_opt->mss_clamp; | 969 | req->mss = rx_opt->mss_clamp; |
955 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; | 970 | req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; |
@@ -1237,7 +1252,7 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | |||
1237 | struct sk_buff *buff, | 1252 | struct sk_buff *buff, |
1238 | struct sock *sk) | 1253 | struct sock *sk) |
1239 | { | 1254 | { |
1240 | __skb_append(skb, buff, &sk->sk_write_queue); | 1255 | __skb_queue_after(&sk->sk_write_queue, skb, buff); |
1241 | } | 1256 | } |
1242 | 1257 | ||
1243 | /* Insert skb between prev and next on the write queue of sk. */ | 1258 | /* Insert skb between prev and next on the write queue of sk. */ |
@@ -1315,25 +1330,25 @@ enum tcp_seq_states { | |||
1315 | }; | 1330 | }; |
1316 | 1331 | ||
1317 | struct tcp_seq_afinfo { | 1332 | struct tcp_seq_afinfo { |
1318 | struct module *owner; | ||
1319 | char *name; | 1333 | char *name; |
1320 | sa_family_t family; | 1334 | sa_family_t family; |
1321 | int (*seq_show) (struct seq_file *m, void *v); | 1335 | struct file_operations seq_fops; |
1322 | struct file_operations *seq_fops; | 1336 | struct seq_operations seq_ops; |
1323 | }; | 1337 | }; |
1324 | 1338 | ||
1325 | struct tcp_iter_state { | 1339 | struct tcp_iter_state { |
1340 | struct seq_net_private p; | ||
1326 | sa_family_t family; | 1341 | sa_family_t family; |
1327 | enum tcp_seq_states state; | 1342 | enum tcp_seq_states state; |
1328 | struct sock *syn_wait_sk; | 1343 | struct sock *syn_wait_sk; |
1329 | int bucket, sbucket, num, uid; | 1344 | int bucket, sbucket, num, uid; |
1330 | struct seq_operations seq_ops; | ||
1331 | }; | 1345 | }; |
1332 | 1346 | ||
1333 | extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo); | 1347 | extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); |
1334 | extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); | 1348 | extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo); |
1335 | 1349 | ||
1336 | extern struct request_sock_ops tcp_request_sock_ops; | 1350 | extern struct request_sock_ops tcp_request_sock_ops; |
1351 | extern struct request_sock_ops tcp6_request_sock_ops; | ||
1337 | 1352 | ||
1338 | extern int tcp_v4_destroy_sock(struct sock *sk); | 1353 | extern int tcp_v4_destroy_sock(struct sock *sk); |
1339 | 1354 | ||
@@ -1375,7 +1390,7 @@ struct tcp_request_sock_ops { | |||
1375 | #endif | 1390 | #endif |
1376 | }; | 1391 | }; |
1377 | 1392 | ||
1378 | extern void tcp_v4_init(struct net_proto_family *ops); | 1393 | extern void tcp_v4_init(void); |
1379 | extern void tcp_init(void); | 1394 | extern void tcp_init(void); |
1380 | 1395 | ||
1381 | #endif /* _TCP_H */ | 1396 | #endif /* _TCP_H */ |
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h index 2151a80cdf30..ee2f304e4919 100644 --- a/include/net/tipc/tipc_bearer.h +++ b/include/net/tipc/tipc_bearer.h | |||
@@ -99,6 +99,9 @@ struct tipc_bearer { | |||
99 | char name[TIPC_MAX_BEARER_NAME]; | 99 | char name[TIPC_MAX_BEARER_NAME]; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | /* | ||
103 | * TIPC routines available to supported media types | ||
104 | */ | ||
102 | 105 | ||
103 | int tipc_register_media(u32 media_type, | 106 | int tipc_register_media(u32 media_type, |
104 | char *media_name, | 107 | char *media_name, |
@@ -123,6 +126,12 @@ void tipc_continue(struct tipc_bearer *tb_ptr); | |||
123 | int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority); | 126 | int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority); |
124 | int tipc_disable_bearer(const char *name); | 127 | int tipc_disable_bearer(const char *name); |
125 | 128 | ||
129 | /* | ||
130 | * Routines made available to TIPC by supported media types | ||
131 | */ | ||
132 | |||
133 | int tipc_eth_media_start(void); | ||
134 | void tipc_eth_media_stop(void); | ||
126 | 135 | ||
127 | #endif | 136 | #endif |
128 | 137 | ||
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h index cfc4ba46de8f..11105bcc4457 100644 --- a/include/net/tipc/tipc_port.h +++ b/include/net/tipc/tipc_port.h | |||
@@ -86,13 +86,6 @@ u32 tipc_createport_raw(void *usr_handle, | |||
86 | void (*wakeup)(struct tipc_port *), | 86 | void (*wakeup)(struct tipc_port *), |
87 | const u32 importance); | 87 | const u32 importance); |
88 | 88 | ||
89 | /* | ||
90 | * tipc_set_msg_option(): port must be locked. | ||
91 | */ | ||
92 | int tipc_set_msg_option(struct tipc_port *tp_ptr, | ||
93 | const char *opt, | ||
94 | const u32 len); | ||
95 | |||
96 | int tipc_reject_msg(struct sk_buff *buf, u32 err); | 89 | int tipc_reject_msg(struct sk_buff *buf, u32 err); |
97 | 90 | ||
98 | int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode); | 91 | int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode); |
@@ -103,6 +96,12 @@ struct tipc_port *tipc_get_port(const u32 ref); | |||
103 | 96 | ||
104 | void *tipc_get_handle(const u32 ref); | 97 | void *tipc_get_handle(const u32 ref); |
105 | 98 | ||
99 | /* | ||
100 | * The following routines require that the port be locked on entry | ||
101 | */ | ||
102 | |||
103 | int tipc_disconnect_port(struct tipc_port *tp_ptr); | ||
104 | |||
106 | 105 | ||
107 | #endif | 106 | #endif |
108 | 107 | ||
diff --git a/include/net/udp.h b/include/net/udp.h index c6669c0a74c7..3e55a99b0ba3 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -115,7 +115,7 @@ static inline void udp_lib_unhash(struct sock *sk) | |||
115 | write_lock_bh(&udp_hash_lock); | 115 | write_lock_bh(&udp_hash_lock); |
116 | if (sk_del_node_init(sk)) { | 116 | if (sk_del_node_init(sk)) { |
117 | inet_sk(sk)->num = 0; | 117 | inet_sk(sk)->num = 0; |
118 | sock_prot_inuse_add(sk->sk_prot, -1); | 118 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
119 | } | 119 | } |
120 | write_unlock_bh(&udp_hash_lock); | 120 | write_unlock_bh(&udp_hash_lock); |
121 | } | 121 | } |
@@ -125,6 +125,8 @@ static inline void udp_lib_close(struct sock *sk, long timeout) | |||
125 | sk_common_release(sk); | 125 | sk_common_release(sk); |
126 | } | 126 | } |
127 | 127 | ||
128 | extern int udp_lib_get_port(struct sock *sk, unsigned short snum, | ||
129 | int (*)(const struct sock*,const struct sock*)); | ||
128 | 130 | ||
129 | /* net/ipv4/udp.c */ | 131 | /* net/ipv4/udp.c */ |
130 | extern int udp_get_port(struct sock *sk, unsigned short snum, | 132 | extern int udp_get_port(struct sock *sk, unsigned short snum, |
@@ -183,24 +185,23 @@ DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); | |||
183 | 185 | ||
184 | /* /proc */ | 186 | /* /proc */ |
185 | struct udp_seq_afinfo { | 187 | struct udp_seq_afinfo { |
186 | struct module *owner; | ||
187 | char *name; | 188 | char *name; |
188 | sa_family_t family; | 189 | sa_family_t family; |
189 | struct hlist_head *hashtable; | 190 | struct hlist_head *hashtable; |
190 | int (*seq_show) (struct seq_file *m, void *v); | 191 | struct file_operations seq_fops; |
191 | struct file_operations *seq_fops; | 192 | struct seq_operations seq_ops; |
192 | }; | 193 | }; |
193 | 194 | ||
194 | struct udp_iter_state { | 195 | struct udp_iter_state { |
196 | struct seq_net_private p; | ||
195 | sa_family_t family; | 197 | sa_family_t family; |
196 | struct hlist_head *hashtable; | 198 | struct hlist_head *hashtable; |
197 | int bucket; | 199 | int bucket; |
198 | struct seq_operations seq_ops; | ||
199 | }; | 200 | }; |
200 | 201 | ||
201 | #ifdef CONFIG_PROC_FS | 202 | #ifdef CONFIG_PROC_FS |
202 | extern int udp_proc_register(struct udp_seq_afinfo *afinfo); | 203 | extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo); |
203 | extern void udp_proc_unregister(struct udp_seq_afinfo *afinfo); | 204 | extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo); |
204 | 205 | ||
205 | extern int udp4_proc_init(void); | 206 | extern int udp4_proc_init(void); |
206 | extern void udp4_proc_exit(void); | 207 | extern void udp4_proc_exit(void); |
diff --git a/include/net/wireless.h b/include/net/wireless.h index d30c4ba8fd99..667b4080d30f 100644 --- a/include/net/wireless.h +++ b/include/net/wireless.h | |||
@@ -13,6 +13,162 @@ | |||
13 | #include <net/cfg80211.h> | 13 | #include <net/cfg80211.h> |
14 | 14 | ||
15 | /** | 15 | /** |
16 | * enum ieee80211_band - supported frequency bands | ||
17 | * | ||
18 | * The bands are assigned this way because the supported | ||
19 | * bitrates differ in these bands. | ||
20 | * | ||
21 | * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band | ||
22 | * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7) | ||
23 | */ | ||
24 | enum ieee80211_band { | ||
25 | IEEE80211_BAND_2GHZ, | ||
26 | IEEE80211_BAND_5GHZ, | ||
27 | |||
28 | /* keep last */ | ||
29 | IEEE80211_NUM_BANDS | ||
30 | }; | ||
31 | |||
32 | /** | ||
33 | * enum ieee80211_channel_flags - channel flags | ||
34 | * | ||
35 | * Channel flags set by the regulatory control code. | ||
36 | * | ||
37 | * @IEEE80211_CHAN_DISABLED: This channel is disabled. | ||
38 | * @IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted | ||
39 | * on this channel. | ||
40 | * @IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel. | ||
41 | * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. | ||
42 | */ | ||
43 | enum ieee80211_channel_flags { | ||
44 | IEEE80211_CHAN_DISABLED = 1<<0, | ||
45 | IEEE80211_CHAN_PASSIVE_SCAN = 1<<1, | ||
46 | IEEE80211_CHAN_NO_IBSS = 1<<2, | ||
47 | IEEE80211_CHAN_RADAR = 1<<3, | ||
48 | }; | ||
49 | |||
50 | /** | ||
51 | * struct ieee80211_channel - channel definition | ||
52 | * | ||
53 | * This structure describes a single channel for use | ||
54 | * with cfg80211. | ||
55 | * | ||
56 | * @center_freq: center frequency in MHz | ||
57 | * @hw_value: hardware-specific value for the channel | ||
58 | * @flags: channel flags from &enum ieee80211_channel_flags. | ||
59 | * @orig_flags: channel flags at registration time, used by regulatory | ||
60 | * code to support devices with additional restrictions | ||
61 | * @band: band this channel belongs to. | ||
62 | * @max_antenna_gain: maximum antenna gain in dBi | ||
63 | * @max_power: maximum transmission power (in dBm) | ||
64 | * @orig_mag: internal use | ||
65 | * @orig_mpwr: internal use | ||
66 | */ | ||
67 | struct ieee80211_channel { | ||
68 | enum ieee80211_band band; | ||
69 | u16 center_freq; | ||
70 | u16 hw_value; | ||
71 | u32 flags; | ||
72 | int max_antenna_gain; | ||
73 | int max_power; | ||
74 | u32 orig_flags; | ||
75 | int orig_mag, orig_mpwr; | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * enum ieee80211_rate_flags - rate flags | ||
80 | * | ||
81 | * Hardware/specification flags for rates. These are structured | ||
82 | * in a way that allows using the same bitrate structure for | ||
83 | * different bands/PHY modes. | ||
84 | * | ||
85 | * @IEEE80211_RATE_SHORT_PREAMBLE: Hardware can send with short | ||
86 | * preamble on this bitrate; only relevant in 2.4GHz band and | ||
87 | * with CCK rates. | ||
88 | * @IEEE80211_RATE_MANDATORY_A: This bitrate is a mandatory rate | ||
89 | * when used with 802.11a (on the 5 GHz band); filled by the | ||
90 | * core code when registering the wiphy. | ||
91 | * @IEEE80211_RATE_MANDATORY_B: This bitrate is a mandatory rate | ||
92 | * when used with 802.11b (on the 2.4 GHz band); filled by the | ||
93 | * core code when registering the wiphy. | ||
94 | * @IEEE80211_RATE_MANDATORY_G: This bitrate is a mandatory rate | ||
95 | * when used with 802.11g (on the 2.4 GHz band); filled by the | ||
96 | * core code when registering the wiphy. | ||
97 | * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. | ||
98 | */ | ||
99 | enum ieee80211_rate_flags { | ||
100 | IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, | ||
101 | IEEE80211_RATE_MANDATORY_A = 1<<1, | ||
102 | IEEE80211_RATE_MANDATORY_B = 1<<2, | ||
103 | IEEE80211_RATE_MANDATORY_G = 1<<3, | ||
104 | IEEE80211_RATE_ERP_G = 1<<4, | ||
105 | }; | ||
106 | |||
107 | /** | ||
108 | * struct ieee80211_rate - bitrate definition | ||
109 | * | ||
110 | * This structure describes a bitrate that an 802.11 PHY can | ||
111 | * operate with. The two values @hw_value and @hw_value_short | ||
112 | * are only for driver use when pointers to this structure are | ||
113 | * passed around. | ||
114 | * | ||
115 | * @flags: rate-specific flags | ||
116 | * @bitrate: bitrate in units of 100 Kbps | ||
117 | * @hw_value: driver/hardware value for this rate | ||
118 | * @hw_value_short: driver/hardware value for this rate when | ||
119 | * short preamble is used | ||
120 | */ | ||
121 | struct ieee80211_rate { | ||
122 | u32 flags; | ||
123 | u16 bitrate; | ||
124 | u16 hw_value, hw_value_short; | ||
125 | }; | ||
126 | |||
127 | /** | ||
128 | * struct ieee80211_ht_info - describing STA's HT capabilities | ||
129 | * | ||
130 | * This structure describes most essential parameters needed | ||
131 | * to describe 802.11n HT capabilities for an STA. | ||
132 | * | ||
133 | * @ht_supported: is HT supported by STA, 0: no, 1: yes | ||
134 | * @cap: HT capabilities map as described in 802.11n spec | ||
135 | * @ampdu_factor: Maximum A-MPDU length factor | ||
136 | * @ampdu_density: Minimum A-MPDU spacing | ||
137 | * @supp_mcs_set: Supported MCS set as described in 802.11n spec | ||
138 | */ | ||
139 | struct ieee80211_ht_info { | ||
140 | u16 cap; /* use IEEE80211_HT_CAP_ */ | ||
141 | u8 ht_supported; | ||
142 | u8 ampdu_factor; | ||
143 | u8 ampdu_density; | ||
144 | u8 supp_mcs_set[16]; | ||
145 | }; | ||
146 | |||
147 | /** | ||
148 | * struct ieee80211_supported_band - frequency band definition | ||
149 | * | ||
150 | * This structure describes a frequency band a wiphy | ||
151 | * is able to operate in. | ||
152 | * | ||
153 | * @channels: Array of channels the hardware can operate in | ||
154 | * in this band. | ||
155 | * @band: the band this structure represents | ||
156 | * @n_channels: Number of channels in @channels | ||
157 | * @bitrates: Array of bitrates the hardware can operate with | ||
158 | * in this band. Must be sorted to give a valid "supported | ||
159 | * rates" IE, i.e. CCK rates first, then OFDM. | ||
160 | * @n_bitrates: Number of bitrates in @bitrates | ||
161 | */ | ||
162 | struct ieee80211_supported_band { | ||
163 | struct ieee80211_channel *channels; | ||
164 | struct ieee80211_rate *bitrates; | ||
165 | enum ieee80211_band band; | ||
166 | int n_channels; | ||
167 | int n_bitrates; | ||
168 | struct ieee80211_ht_info ht_info; | ||
169 | }; | ||
170 | |||
171 | /** | ||
16 | * struct wiphy - wireless hardware description | 172 | * struct wiphy - wireless hardware description |
17 | * @idx: the wiphy index assigned to this item | 173 | * @idx: the wiphy index assigned to this item |
18 | * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name> | 174 | * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name> |
@@ -30,6 +186,8 @@ struct wiphy { | |||
30 | * help determine whether you own this wiphy or not. */ | 186 | * help determine whether you own this wiphy or not. */ |
31 | void *privid; | 187 | void *privid; |
32 | 188 | ||
189 | struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; | ||
190 | |||
33 | /* fields below are read-only, assigned by cfg80211 */ | 191 | /* fields below are read-only, assigned by cfg80211 */ |
34 | 192 | ||
35 | /* the item in /sys/class/ieee80211/ points to this, | 193 | /* the item in /sys/class/ieee80211/ points to this, |
@@ -136,4 +294,32 @@ extern void wiphy_unregister(struct wiphy *wiphy); | |||
136 | */ | 294 | */ |
137 | extern void wiphy_free(struct wiphy *wiphy); | 295 | extern void wiphy_free(struct wiphy *wiphy); |
138 | 296 | ||
297 | /** | ||
298 | * ieee80211_channel_to_frequency - convert channel number to frequency | ||
299 | */ | ||
300 | extern int ieee80211_channel_to_frequency(int chan); | ||
301 | |||
302 | /** | ||
303 | * ieee80211_frequency_to_channel - convert frequency to channel number | ||
304 | */ | ||
305 | extern int ieee80211_frequency_to_channel(int freq); | ||
306 | |||
307 | /* | ||
308 | * Name indirection necessary because the ieee80211 code also has | ||
309 | * a function named "ieee80211_get_channel", so if you include | ||
310 | * cfg80211's header file you get cfg80211's version, if you try | ||
311 | * to include both header files you'll (rightfully!) get a symbol | ||
312 | * clash. | ||
313 | */ | ||
314 | extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, | ||
315 | int freq); | ||
316 | |||
317 | /** | ||
318 | * ieee80211_get_channel - get channel struct from wiphy for specified frequency | ||
319 | */ | ||
320 | static inline struct ieee80211_channel * | ||
321 | ieee80211_get_channel(struct wiphy *wiphy, int freq) | ||
322 | { | ||
323 | return __ieee80211_get_channel(wiphy, freq); | ||
324 | } | ||
139 | #endif /* __NET_WIRELESS_H */ | 325 | #endif /* __NET_WIRELESS_H */ |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 0d255ae008b6..b56b6a10fe5e 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -121,6 +121,7 @@ extern struct mutex xfrm_cfg_mutex; | |||
121 | struct xfrm_state | 121 | struct xfrm_state |
122 | { | 122 | { |
123 | /* Note: bydst is re-used during gc */ | 123 | /* Note: bydst is re-used during gc */ |
124 | struct list_head all; | ||
124 | struct hlist_node bydst; | 125 | struct hlist_node bydst; |
125 | struct hlist_node bysrc; | 126 | struct hlist_node bysrc; |
126 | struct hlist_node byspi; | 127 | struct hlist_node byspi; |
@@ -446,6 +447,7 @@ struct xfrm_tmpl | |||
446 | struct xfrm_policy | 447 | struct xfrm_policy |
447 | { | 448 | { |
448 | struct xfrm_policy *next; | 449 | struct xfrm_policy *next; |
450 | struct list_head bytype; | ||
449 | struct hlist_node bydst; | 451 | struct hlist_node bydst; |
450 | struct hlist_node byidx; | 452 | struct hlist_node byidx; |
451 | 453 | ||
@@ -1071,6 +1073,23 @@ xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family) | |||
1071 | return NULL; | 1073 | return NULL; |
1072 | } | 1074 | } |
1073 | 1075 | ||
1076 | static __inline__ | ||
1077 | void xfrm_flowi_addr_get(struct flowi *fl, | ||
1078 | xfrm_address_t *saddr, xfrm_address_t *daddr, | ||
1079 | unsigned short family) | ||
1080 | { | ||
1081 | switch(family) { | ||
1082 | case AF_INET: | ||
1083 | memcpy(&saddr->a4, &fl->fl4_src, sizeof(saddr->a4)); | ||
1084 | memcpy(&daddr->a4, &fl->fl4_dst, sizeof(daddr->a4)); | ||
1085 | break; | ||
1086 | case AF_INET6: | ||
1087 | ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->fl6_src); | ||
1088 | ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->fl6_dst); | ||
1089 | break; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1074 | static __inline__ int | 1093 | static __inline__ int |
1075 | __xfrm4_state_addr_check(struct xfrm_state *x, | 1094 | __xfrm4_state_addr_check(struct xfrm_state *x, |
1076 | xfrm_address_t *daddr, xfrm_address_t *saddr) | 1095 | xfrm_address_t *daddr, xfrm_address_t *saddr) |
@@ -1188,6 +1207,18 @@ struct xfrm6_tunnel { | |||
1188 | int priority; | 1207 | int priority; |
1189 | }; | 1208 | }; |
1190 | 1209 | ||
1210 | struct xfrm_state_walk { | ||
1211 | struct xfrm_state *state; | ||
1212 | int count; | ||
1213 | u8 proto; | ||
1214 | }; | ||
1215 | |||
1216 | struct xfrm_policy_walk { | ||
1217 | struct xfrm_policy *policy; | ||
1218 | int count; | ||
1219 | u8 type, cur_type; | ||
1220 | }; | ||
1221 | |||
1191 | extern void xfrm_init(void); | 1222 | extern void xfrm_init(void); |
1192 | extern void xfrm4_init(void); | 1223 | extern void xfrm4_init(void); |
1193 | extern void xfrm_state_init(void); | 1224 | extern void xfrm_state_init(void); |
@@ -1212,7 +1243,23 @@ static inline void xfrm6_fini(void) | |||
1212 | extern int xfrm_proc_init(void); | 1243 | extern int xfrm_proc_init(void); |
1213 | #endif | 1244 | #endif |
1214 | 1245 | ||
1215 | extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *); | 1246 | static inline void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto) |
1247 | { | ||
1248 | walk->proto = proto; | ||
1249 | walk->state = NULL; | ||
1250 | walk->count = 0; | ||
1251 | } | ||
1252 | |||
1253 | static inline void xfrm_state_walk_done(struct xfrm_state_walk *walk) | ||
1254 | { | ||
1255 | if (walk->state != NULL) { | ||
1256 | xfrm_state_put(walk->state); | ||
1257 | walk->state = NULL; | ||
1258 | } | ||
1259 | } | ||
1260 | |||
1261 | extern int xfrm_state_walk(struct xfrm_state_walk *walk, | ||
1262 | int (*func)(struct xfrm_state *, int, void*), void *); | ||
1216 | extern struct xfrm_state *xfrm_state_alloc(void); | 1263 | extern struct xfrm_state *xfrm_state_alloc(void); |
1217 | extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | 1264 | extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, |
1218 | struct flowi *fl, struct xfrm_tmpl *tmpl, | 1265 | struct flowi *fl, struct xfrm_tmpl *tmpl, |
@@ -1335,7 +1382,25 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) | |||
1335 | #endif | 1382 | #endif |
1336 | 1383 | ||
1337 | struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); | 1384 | struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); |
1338 | extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *); | 1385 | |
1386 | static inline void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) | ||
1387 | { | ||
1388 | walk->cur_type = XFRM_POLICY_TYPE_MAIN; | ||
1389 | walk->type = type; | ||
1390 | walk->policy = NULL; | ||
1391 | walk->count = 0; | ||
1392 | } | ||
1393 | |||
1394 | static inline void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) | ||
1395 | { | ||
1396 | if (walk->policy != NULL) { | ||
1397 | xfrm_pol_put(walk->policy); | ||
1398 | walk->policy = NULL; | ||
1399 | } | ||
1400 | } | ||
1401 | |||
1402 | extern int xfrm_policy_walk(struct xfrm_policy_walk *walk, | ||
1403 | int (*func)(struct xfrm_policy *, int, int, void*), void *); | ||
1339 | int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); | 1404 | int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); |
1340 | struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir, | 1405 | struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir, |
1341 | struct xfrm_selector *sel, | 1406 | struct xfrm_selector *sel, |
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index 64a721fcbc1c..8d65bf0a625b 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h | |||
@@ -533,7 +533,10 @@ struct ib_uverbs_send_wr { | |||
533 | __u32 num_sge; | 533 | __u32 num_sge; |
534 | __u32 opcode; | 534 | __u32 opcode; |
535 | __u32 send_flags; | 535 | __u32 send_flags; |
536 | __u32 imm_data; | 536 | union { |
537 | __u32 imm_data; | ||
538 | __u32 invalidate_rkey; | ||
539 | } ex; | ||
537 | union { | 540 | union { |
538 | struct { | 541 | struct { |
539 | __u64 remote_addr; | 542 | __u64 remote_addr; |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 701e7b40560a..95bf4bac44cb 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -94,7 +94,7 @@ enum ib_device_cap_flags { | |||
94 | IB_DEVICE_SRQ_RESIZE = (1<<13), | 94 | IB_DEVICE_SRQ_RESIZE = (1<<13), |
95 | IB_DEVICE_N_NOTIFY_CQ = (1<<14), | 95 | IB_DEVICE_N_NOTIFY_CQ = (1<<14), |
96 | IB_DEVICE_ZERO_STAG = (1<<15), | 96 | IB_DEVICE_ZERO_STAG = (1<<15), |
97 | IB_DEVICE_SEND_W_INV = (1<<16), | 97 | IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ |
98 | IB_DEVICE_MEM_WINDOW = (1<<17), | 98 | IB_DEVICE_MEM_WINDOW = (1<<17), |
99 | /* | 99 | /* |
100 | * Devices should set IB_DEVICE_UD_IP_SUM if they support | 100 | * Devices should set IB_DEVICE_UD_IP_SUM if they support |
@@ -104,6 +104,8 @@ enum ib_device_cap_flags { | |||
104 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. | 104 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. |
105 | */ | 105 | */ |
106 | IB_DEVICE_UD_IP_CSUM = (1<<18), | 106 | IB_DEVICE_UD_IP_CSUM = (1<<18), |
107 | IB_DEVICE_UD_TSO = (1<<19), | ||
108 | IB_DEVICE_SEND_W_INV = (1<<21), | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | enum ib_atomic_cap { | 111 | enum ib_atomic_cap { |
@@ -411,6 +413,7 @@ enum ib_wc_opcode { | |||
411 | IB_WC_COMP_SWAP, | 413 | IB_WC_COMP_SWAP, |
412 | IB_WC_FETCH_ADD, | 414 | IB_WC_FETCH_ADD, |
413 | IB_WC_BIND_MW, | 415 | IB_WC_BIND_MW, |
416 | IB_WC_LSO, | ||
414 | /* | 417 | /* |
415 | * Set value of IB_WC_RECV so consumers can test if a completion is a | 418 | * Set value of IB_WC_RECV so consumers can test if a completion is a |
416 | * receive by testing (opcode & IB_WC_RECV). | 419 | * receive by testing (opcode & IB_WC_RECV). |
@@ -495,6 +498,10 @@ enum ib_qp_type { | |||
495 | IB_QPT_RAW_ETY | 498 | IB_QPT_RAW_ETY |
496 | }; | 499 | }; |
497 | 500 | ||
501 | enum ib_qp_create_flags { | ||
502 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, | ||
503 | }; | ||
504 | |||
498 | struct ib_qp_init_attr { | 505 | struct ib_qp_init_attr { |
499 | void (*event_handler)(struct ib_event *, void *); | 506 | void (*event_handler)(struct ib_event *, void *); |
500 | void *qp_context; | 507 | void *qp_context; |
@@ -504,6 +511,7 @@ struct ib_qp_init_attr { | |||
504 | struct ib_qp_cap cap; | 511 | struct ib_qp_cap cap; |
505 | enum ib_sig_type sq_sig_type; | 512 | enum ib_sig_type sq_sig_type; |
506 | enum ib_qp_type qp_type; | 513 | enum ib_qp_type qp_type; |
514 | enum ib_qp_create_flags create_flags; | ||
507 | u8 port_num; /* special QP types only */ | 515 | u8 port_num; /* special QP types only */ |
508 | }; | 516 | }; |
509 | 517 | ||
@@ -617,7 +625,9 @@ enum ib_wr_opcode { | |||
617 | IB_WR_SEND_WITH_IMM, | 625 | IB_WR_SEND_WITH_IMM, |
618 | IB_WR_RDMA_READ, | 626 | IB_WR_RDMA_READ, |
619 | IB_WR_ATOMIC_CMP_AND_SWP, | 627 | IB_WR_ATOMIC_CMP_AND_SWP, |
620 | IB_WR_ATOMIC_FETCH_AND_ADD | 628 | IB_WR_ATOMIC_FETCH_AND_ADD, |
629 | IB_WR_LSO, | ||
630 | IB_WR_SEND_WITH_INV, | ||
621 | }; | 631 | }; |
622 | 632 | ||
623 | enum ib_send_flags { | 633 | enum ib_send_flags { |
@@ -641,7 +651,10 @@ struct ib_send_wr { | |||
641 | int num_sge; | 651 | int num_sge; |
642 | enum ib_wr_opcode opcode; | 652 | enum ib_wr_opcode opcode; |
643 | int send_flags; | 653 | int send_flags; |
644 | __be32 imm_data; | 654 | union { |
655 | __be32 imm_data; | ||
656 | u32 invalidate_rkey; | ||
657 | } ex; | ||
645 | union { | 658 | union { |
646 | struct { | 659 | struct { |
647 | u64 remote_addr; | 660 | u64 remote_addr; |
@@ -655,6 +668,9 @@ struct ib_send_wr { | |||
655 | } atomic; | 668 | } atomic; |
656 | struct { | 669 | struct { |
657 | struct ib_ah *ah; | 670 | struct ib_ah *ah; |
671 | void *header; | ||
672 | int hlen; | ||
673 | int mss; | ||
658 | u32 remote_qpn; | 674 | u32 remote_qpn; |
659 | u32 remote_qkey; | 675 | u32 remote_qkey; |
660 | u16 pkey_index; /* valid for GSI only */ | 676 | u16 pkey_index; /* valid for GSI only */ |
@@ -730,7 +746,7 @@ struct ib_uobject { | |||
730 | struct ib_ucontext *context; /* associated user context */ | 746 | struct ib_ucontext *context; /* associated user context */ |
731 | void *object; /* containing object */ | 747 | void *object; /* containing object */ |
732 | struct list_head list; /* link to context's list */ | 748 | struct list_head list; /* link to context's list */ |
733 | u32 id; /* index into kernel idr */ | 749 | int id; /* index into kernel idr */ |
734 | struct kref ref; | 750 | struct kref ref; |
735 | struct rw_semaphore mutex; /* protects .live */ | 751 | struct rw_semaphore mutex; /* protects .live */ |
736 | int live; | 752 | int live; |
@@ -971,6 +987,8 @@ struct ib_device { | |||
971 | int comp_vector, | 987 | int comp_vector, |
972 | struct ib_ucontext *context, | 988 | struct ib_ucontext *context, |
973 | struct ib_udata *udata); | 989 | struct ib_udata *udata); |
990 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, | ||
991 | u16 cq_period); | ||
974 | int (*destroy_cq)(struct ib_cq *cq); | 992 | int (*destroy_cq)(struct ib_cq *cq); |
975 | int (*resize_cq)(struct ib_cq *cq, int cqe, | 993 | int (*resize_cq)(struct ib_cq *cq, int cqe, |
976 | struct ib_udata *udata); | 994 | struct ib_udata *udata); |
@@ -1376,6 +1394,15 @@ struct ib_cq *ib_create_cq(struct ib_device *device, | |||
1376 | int ib_resize_cq(struct ib_cq *cq, int cqe); | 1394 | int ib_resize_cq(struct ib_cq *cq, int cqe); |
1377 | 1395 | ||
1378 | /** | 1396 | /** |
1397 | * ib_modify_cq - Modifies moderation params of the CQ | ||
1398 | * @cq: The CQ to modify. | ||
1399 | * @cq_count: number of CQEs that will trigger an event | ||
1400 | * @cq_period: max period of time in usec before triggering an event | ||
1401 | * | ||
1402 | */ | ||
1403 | int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | ||
1404 | |||
1405 | /** | ||
1379 | * ib_destroy_cq - Destroys the specified CQ. | 1406 | * ib_destroy_cq - Destroys the specified CQ. |
1380 | * @cq: The CQ to destroy. | 1407 | * @cq: The CQ to destroy. |
1381 | */ | 1408 | */ |
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index 5ffec8ad6964..e0593bfae622 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h | |||
@@ -112,6 +112,7 @@ struct iscsi_ahs_hdr { | |||
112 | 112 | ||
113 | #define ISCSI_AHSTYPE_CDB 1 | 113 | #define ISCSI_AHSTYPE_CDB 1 |
114 | #define ISCSI_AHSTYPE_RLENGTH 2 | 114 | #define ISCSI_AHSTYPE_RLENGTH 2 |
115 | #define ISCSI_CDB_SIZE 16 | ||
115 | 116 | ||
116 | /* iSCSI PDU Header */ | 117 | /* iSCSI PDU Header */ |
117 | struct iscsi_cmd { | 118 | struct iscsi_cmd { |
@@ -125,7 +126,7 @@ struct iscsi_cmd { | |||
125 | __be32 data_length; | 126 | __be32 data_length; |
126 | __be32 cmdsn; | 127 | __be32 cmdsn; |
127 | __be32 exp_statsn; | 128 | __be32 exp_statsn; |
128 | uint8_t cdb[16]; /* SCSI Command Block */ | 129 | uint8_t cdb[ISCSI_CDB_SIZE]; /* SCSI Command Block */ |
129 | /* Additional Data (Command Dependent) */ | 130 | /* Additional Data (Command Dependent) */ |
130 | }; | 131 | }; |
131 | 132 | ||
@@ -154,7 +155,8 @@ struct iscsi_ecdb_ahdr { | |||
154 | __be16 ahslength; /* CDB length - 15, including reserved byte */ | 155 | __be16 ahslength; /* CDB length - 15, including reserved byte */ |
155 | uint8_t ahstype; | 156 | uint8_t ahstype; |
156 | uint8_t reserved; | 157 | uint8_t reserved; |
157 | uint8_t ecdb[260 - 16]; /* 4-byte aligned extended CDB spillover */ | 158 | /* 4-byte aligned extended CDB spillover */ |
159 | uint8_t ecdb[260 - ISCSI_CDB_SIZE]; | ||
158 | }; | 160 | }; |
159 | 161 | ||
160 | /* SCSI Response Header */ | 162 | /* SCSI Response Header */ |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 39e1cac24bb7..98724ba65a79 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -677,4 +677,6 @@ extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, | |||
677 | struct ssp_response_iu *iu); | 677 | struct ssp_response_iu *iu); |
678 | struct sas_phy *sas_find_local_phy(struct domain_device *dev); | 678 | struct sas_phy *sas_find_local_phy(struct domain_device *dev); |
679 | 679 | ||
680 | int sas_request_addr(struct Scsi_Host *shost, u8 *addr); | ||
681 | |||
680 | #endif /* _SASLIB_H_ */ | 682 | #endif /* _SASLIB_H_ */ |
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index dd5edc915417..c583193ae929 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h | |||
@@ -47,12 +47,12 @@ static inline int dev_is_sata(struct domain_device *dev) | |||
47 | { | 47 | { |
48 | return 0; | 48 | return 0; |
49 | } | 49 | } |
50 | int sas_ata_init_host_and_port(struct domain_device *found_dev, | 50 | static inline int sas_ata_init_host_and_port(struct domain_device *found_dev, |
51 | struct scsi_target *starget) | 51 | struct scsi_target *starget) |
52 | { | 52 | { |
53 | return 0; | 53 | return 0; |
54 | } | 54 | } |
55 | void sas_ata_task_abort(struct sas_task *task) | 55 | static inline void sas_ata_task_abort(struct sas_task *task) |
56 | { | 56 | { |
57 | } | 57 | } |
58 | #endif | 58 | #endif |
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index de28aab820b0..8d20e60a94b7 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h | |||
@@ -130,6 +130,9 @@ extern void scsi_release_buffers(struct scsi_cmnd *cmd); | |||
130 | extern int scsi_dma_map(struct scsi_cmnd *cmd); | 130 | extern int scsi_dma_map(struct scsi_cmnd *cmd); |
131 | extern void scsi_dma_unmap(struct scsi_cmnd *cmd); | 131 | extern void scsi_dma_unmap(struct scsi_cmnd *cmd); |
132 | 132 | ||
133 | struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask); | ||
134 | void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd); | ||
135 | |||
133 | static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd) | 136 | static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd) |
134 | { | 137 | { |
135 | return cmd->sdb.table.nents; | 138 | return cmd->sdb.table.nents; |
@@ -175,4 +178,18 @@ static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd) | |||
175 | return &cmd->sdb; | 178 | return &cmd->sdb; |
176 | } | 179 | } |
177 | 180 | ||
181 | static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd, | ||
182 | void *buf, int buflen) | ||
183 | { | ||
184 | return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), | ||
185 | buf, buflen); | ||
186 | } | ||
187 | |||
188 | static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd, | ||
189 | void *buf, int buflen) | ||
190 | { | ||
191 | return sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), | ||
192 | buf, buflen); | ||
193 | } | ||
194 | |||
178 | #endif /* _SCSI_SCSI_CMND_H */ | 195 | #endif /* _SCSI_SCSI_CMND_H */ |
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 25071d5d9bf8..d3a133b4a072 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h | |||
@@ -57,13 +57,16 @@ extern const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, | |||
57 | 57 | ||
58 | extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, | 58 | extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, |
59 | u64 * info_out); | 59 | u64 * info_out); |
60 | 60 | ||
61 | extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); | ||
62 | |||
61 | /* | 63 | /* |
62 | * Reset request from external source | 64 | * Reset request from external source |
63 | */ | 65 | */ |
64 | #define SCSI_TRY_RESET_DEVICE 1 | 66 | #define SCSI_TRY_RESET_DEVICE 1 |
65 | #define SCSI_TRY_RESET_BUS 2 | 67 | #define SCSI_TRY_RESET_BUS 2 |
66 | #define SCSI_TRY_RESET_HOST 3 | 68 | #define SCSI_TRY_RESET_HOST 3 |
69 | #define SCSI_TRY_RESET_TARGET 4 | ||
67 | 70 | ||
68 | extern int scsi_reset_provider(struct scsi_device *, int); | 71 | extern int scsi_reset_provider(struct scsi_device *, int); |
69 | 72 | ||
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 530ff4c553f8..49132862bfaa 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -172,6 +172,7 @@ struct scsi_host_template { | |||
172 | */ | 172 | */ |
173 | int (* eh_abort_handler)(struct scsi_cmnd *); | 173 | int (* eh_abort_handler)(struct scsi_cmnd *); |
174 | int (* eh_device_reset_handler)(struct scsi_cmnd *); | 174 | int (* eh_device_reset_handler)(struct scsi_cmnd *); |
175 | int (* eh_target_reset_handler)(struct scsi_cmnd *); | ||
175 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); | 176 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); |
176 | int (* eh_host_reset_handler)(struct scsi_cmnd *); | 177 | int (* eh_host_reset_handler)(struct scsi_cmnd *); |
177 | 178 | ||