diff options
author | Greg KH <gregkh@suse.de> | 2005-09-12 15:45:04 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2005-09-12 15:45:04 -0400 |
commit | d58dde0f552a5c5c4485b962d8b6e9dd54fefb30 (patch) | |
tree | d9a7e35eb88fea6265d5aadcc3d4ed39122b052a /include | |
parent | 877599fdef5ea4a7dd1956e22fa9d6923add97f8 (diff) | |
parent | 2ade81473636b33aaac64495f89a7dc572c529f0 (diff) |
Merge ../torvalds-2.6/
Diffstat (limited to 'include')
186 files changed, 3534 insertions, 2640 deletions
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h index 80780dba9986..8197c69eff44 100644 --- a/include/asm-alpha/spinlock.h +++ b/include/asm-alpha/spinlock.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <asm/current.h> | 7 | #include <asm/current.h> |
8 | 8 | ||
9 | |||
10 | /* | 9 | /* |
11 | * Simple spin lock operations. There are two variants, one clears IRQ's | 10 | * Simple spin lock operations. There are two variants, one clears IRQ's |
12 | * on the local processor, one does not. | 11 | * on the local processor, one does not. |
@@ -14,43 +13,18 @@ | |||
14 | * We make no fairness assumptions. They have a cost. | 13 | * We make no fairness assumptions. They have a cost. |
15 | */ | 14 | */ |
16 | 15 | ||
17 | typedef struct { | 16 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
18 | volatile unsigned int lock; | 17 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
19 | #ifdef CONFIG_DEBUG_SPINLOCK | 18 | #define __raw_spin_unlock_wait(x) \ |
20 | int on_cpu; | 19 | do { cpu_relax(); } while ((x)->lock) |
21 | int line_no; | 20 | |
22 | void *previous; | 21 | static inline void __raw_spin_unlock(raw_spinlock_t * lock) |
23 | struct task_struct * task; | ||
24 | const char *base_file; | ||
25 | #endif | ||
26 | } spinlock_t; | ||
27 | |||
28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
29 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL } | ||
30 | #else | ||
31 | #define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 } | ||
32 | #endif | ||
33 | |||
34 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
35 | #define spin_is_locked(x) ((x)->lock != 0) | ||
36 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
37 | |||
38 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
39 | extern void _raw_spin_unlock(spinlock_t * lock); | ||
40 | extern void debug_spin_lock(spinlock_t * lock, const char *, int); | ||
41 | extern int debug_spin_trylock(spinlock_t * lock, const char *, int); | ||
42 | #define _raw_spin_lock(LOCK) \ | ||
43 | debug_spin_lock(LOCK, __BASE_FILE__, __LINE__) | ||
44 | #define _raw_spin_trylock(LOCK) \ | ||
45 | debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__) | ||
46 | #else | ||
47 | static inline void _raw_spin_unlock(spinlock_t * lock) | ||
48 | { | 22 | { |
49 | mb(); | 23 | mb(); |
50 | lock->lock = 0; | 24 | lock->lock = 0; |
51 | } | 25 | } |
52 | 26 | ||
53 | static inline void _raw_spin_lock(spinlock_t * lock) | 27 | static inline void __raw_spin_lock(raw_spinlock_t * lock) |
54 | { | 28 | { |
55 | long tmp; | 29 | long tmp; |
56 | 30 | ||
@@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock) | |||
70 | : "m"(lock->lock) : "memory"); | 44 | : "m"(lock->lock) : "memory"); |
71 | } | 45 | } |
72 | 46 | ||
73 | static inline int _raw_spin_trylock(spinlock_t *lock) | 47 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
74 | { | 48 | { |
75 | return !test_and_set_bit(0, &lock->lock); | 49 | return !test_and_set_bit(0, &lock->lock); |
76 | } | 50 | } |
77 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
78 | |||
79 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
80 | 51 | ||
81 | /***********************************************************/ | 52 | /***********************************************************/ |
82 | 53 | ||
83 | typedef struct { | 54 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
84 | volatile unsigned int lock; | ||
85 | } rwlock_t; | ||
86 | |||
87 | #define RW_LOCK_UNLOCKED (rwlock_t){ 0 } | ||
88 | |||
89 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
90 | |||
91 | static inline int read_can_lock(rwlock_t *lock) | ||
92 | { | 55 | { |
93 | return (lock->lock & 1) == 0; | 56 | return (lock->lock & 1) == 0; |
94 | } | 57 | } |
95 | 58 | ||
96 | static inline int write_can_lock(rwlock_t *lock) | 59 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
97 | { | 60 | { |
98 | return lock->lock == 0; | 61 | return lock->lock == 0; |
99 | } | 62 | } |
100 | 63 | ||
101 | #ifdef CONFIG_DEBUG_RWLOCK | 64 | static inline void __raw_read_lock(raw_rwlock_t *lock) |
102 | extern void _raw_write_lock(rwlock_t * lock); | ||
103 | extern void _raw_read_lock(rwlock_t * lock); | ||
104 | #else | ||
105 | static inline void _raw_write_lock(rwlock_t * lock) | ||
106 | { | 65 | { |
107 | long regx; | 66 | long regx; |
108 | 67 | ||
109 | __asm__ __volatile__( | 68 | __asm__ __volatile__( |
110 | "1: ldl_l %1,%0\n" | 69 | "1: ldl_l %1,%0\n" |
111 | " bne %1,6f\n" | 70 | " blbs %1,6f\n" |
112 | " lda %1,1\n" | 71 | " subl %1,2,%1\n" |
113 | " stl_c %1,%0\n" | 72 | " stl_c %1,%0\n" |
114 | " beq %1,6f\n" | 73 | " beq %1,6f\n" |
115 | " mb\n" | 74 | " mb\n" |
116 | ".subsection 2\n" | 75 | ".subsection 2\n" |
117 | "6: ldl %1,%0\n" | 76 | "6: ldl %1,%0\n" |
118 | " bne %1,6b\n" | 77 | " blbs %1,6b\n" |
119 | " br 1b\n" | 78 | " br 1b\n" |
120 | ".previous" | 79 | ".previous" |
121 | : "=m" (*lock), "=&r" (regx) | 80 | : "=m" (*lock), "=&r" (regx) |
122 | : "m" (*lock) : "memory"); | 81 | : "m" (*lock) : "memory"); |
123 | } | 82 | } |
124 | 83 | ||
125 | static inline void _raw_read_lock(rwlock_t * lock) | 84 | static inline void __raw_write_lock(raw_rwlock_t *lock) |
126 | { | 85 | { |
127 | long regx; | 86 | long regx; |
128 | 87 | ||
129 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
130 | "1: ldl_l %1,%0\n" | 89 | "1: ldl_l %1,%0\n" |
131 | " blbs %1,6f\n" | 90 | " bne %1,6f\n" |
132 | " subl %1,2,%1\n" | 91 | " lda %1,1\n" |
133 | " stl_c %1,%0\n" | 92 | " stl_c %1,%0\n" |
134 | " beq %1,6f\n" | 93 | " beq %1,6f\n" |
135 | " mb\n" | 94 | " mb\n" |
136 | ".subsection 2\n" | 95 | ".subsection 2\n" |
137 | "6: ldl %1,%0\n" | 96 | "6: ldl %1,%0\n" |
138 | " blbs %1,6b\n" | 97 | " bne %1,6b\n" |
139 | " br 1b\n" | 98 | " br 1b\n" |
140 | ".previous" | 99 | ".previous" |
141 | : "=m" (*lock), "=&r" (regx) | 100 | : "=m" (*lock), "=&r" (regx) |
142 | : "m" (*lock) : "memory"); | 101 | : "m" (*lock) : "memory"); |
143 | } | 102 | } |
144 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
145 | 103 | ||
146 | static inline int _raw_read_trylock(rwlock_t * lock) | 104 | static inline int __raw_read_trylock(raw_rwlock_t * lock) |
147 | { | 105 | { |
148 | long regx; | 106 | long regx; |
149 | int success; | 107 | int success; |
@@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock) | |||
165 | return success; | 123 | return success; |
166 | } | 124 | } |
167 | 125 | ||
168 | static inline int _raw_write_trylock(rwlock_t * lock) | 126 | static inline int __raw_write_trylock(raw_rwlock_t * lock) |
169 | { | 127 | { |
170 | long regx; | 128 | long regx; |
171 | int success; | 129 | int success; |
@@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock) | |||
187 | return success; | 145 | return success; |
188 | } | 146 | } |
189 | 147 | ||
190 | static inline void _raw_write_unlock(rwlock_t * lock) | 148 | static inline void __raw_read_unlock(raw_rwlock_t * lock) |
191 | { | ||
192 | mb(); | ||
193 | lock->lock = 0; | ||
194 | } | ||
195 | |||
196 | static inline void _raw_read_unlock(rwlock_t * lock) | ||
197 | { | 149 | { |
198 | long regx; | 150 | long regx; |
199 | __asm__ __volatile__( | 151 | __asm__ __volatile__( |
@@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock) | |||
209 | : "m" (*lock) : "memory"); | 161 | : "m" (*lock) : "memory"); |
210 | } | 162 | } |
211 | 163 | ||
164 | static inline void __raw_write_unlock(raw_rwlock_t * lock) | ||
165 | { | ||
166 | mb(); | ||
167 | lock->lock = 0; | ||
168 | } | ||
169 | |||
212 | #endif /* _ALPHA_SPINLOCK_H */ | 170 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h new file mode 100644 index 000000000000..8141eb5ebf0d --- /dev/null +++ b/include/asm-alpha/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ALPHA_SPINLOCK_TYPES_H | ||
2 | #define _ALPHA_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-arm/arch-pxa/pxafb.h b/include/asm-arm/arch-pxa/pxafb.h index 27d71e9d413b..21c0e16dce5f 100644 --- a/include/asm-arm/arch-pxa/pxafb.h +++ b/include/asm-arm/arch-pxa/pxafb.h | |||
@@ -66,3 +66,4 @@ struct pxafb_mach_info { | |||
66 | 66 | ||
67 | }; | 67 | }; |
68 | void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info); | 68 | void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info); |
69 | unsigned long pxafb_get_hsync_time(struct device *dev); | ||
diff --git a/include/asm-arm/arch-s3c2410/fb.h b/include/asm-arm/arch-s3c2410/fb.h new file mode 100644 index 000000000000..ac57bc887d82 --- /dev/null +++ b/include/asm-arm/arch-s3c2410/fb.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* linux/include/asm/arch-s3c2410/fb.h | ||
2 | * | ||
3 | * Copyright (c) 2004 Arnaud Patard <arnaud.patard@rtp-net.org> | ||
4 | * | ||
5 | * Inspired by pxafb.h | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * | ||
12 | * Changelog: | ||
13 | * 07-Sep-2004 RTP Created file | ||
14 | * 03-Nov-2004 BJD Updated and minor cleanups | ||
15 | * 03-Aug-2005 RTP Renamed to fb.h | ||
16 | */ | ||
17 | |||
18 | #ifndef __ASM_ARM_FB_H | ||
19 | #define __ASM_ARM_FB_H | ||
20 | |||
21 | #include <asm/arch/regs-lcd.h> | ||
22 | |||
23 | struct s3c2410fb_val { | ||
24 | unsigned int defval; | ||
25 | unsigned int min; | ||
26 | unsigned int max; | ||
27 | }; | ||
28 | |||
29 | struct s3c2410fb_hw { | ||
30 | unsigned long lcdcon1; | ||
31 | unsigned long lcdcon2; | ||
32 | unsigned long lcdcon3; | ||
33 | unsigned long lcdcon4; | ||
34 | unsigned long lcdcon5; | ||
35 | }; | ||
36 | |||
37 | struct s3c2410fb_mach_info { | ||
38 | unsigned char fixed_syncs; /* do not update sync/border */ | ||
39 | |||
40 | /* Screen size */ | ||
41 | int width; | ||
42 | int height; | ||
43 | |||
44 | /* Screen info */ | ||
45 | struct s3c2410fb_val xres; | ||
46 | struct s3c2410fb_val yres; | ||
47 | struct s3c2410fb_val bpp; | ||
48 | |||
49 | /* lcd configuration registers */ | ||
50 | struct s3c2410fb_hw regs; | ||
51 | |||
52 | /* GPIOs */ | ||
53 | |||
54 | unsigned long gpcup; | ||
55 | unsigned long gpcup_mask; | ||
56 | unsigned long gpccon; | ||
57 | unsigned long gpccon_mask; | ||
58 | unsigned long gpdup; | ||
59 | unsigned long gpdup_mask; | ||
60 | unsigned long gpdcon; | ||
61 | unsigned long gpdcon_mask; | ||
62 | |||
63 | /* lpc3600 control register */ | ||
64 | unsigned long lpcsel; | ||
65 | }; | ||
66 | |||
67 | void __init set_s3c2410fb_info(struct s3c2410fb_mach_info *hard_s3c2410fb_info); | ||
68 | |||
69 | #endif /* __ASM_ARM_FB_H */ | ||
diff --git a/include/asm-arm/arch-s3c2410/regs-lcd.h b/include/asm-arm/arch-s3c2410/regs-lcd.h index 7f882ea92b2a..b6b1b4e8bbeb 100644 --- a/include/asm-arm/arch-s3c2410/regs-lcd.h +++ b/include/asm-arm/arch-s3c2410/regs-lcd.h | |||
@@ -51,21 +51,32 @@ | |||
51 | 51 | ||
52 | #define S3C2410_LCDCON1_ENVID (1) | 52 | #define S3C2410_LCDCON1_ENVID (1) |
53 | 53 | ||
54 | #define S3C2410_LCDCON1_MODEMASK 0x1E | ||
55 | |||
54 | #define S3C2410_LCDCON2_VBPD(x) ((x) << 24) | 56 | #define S3C2410_LCDCON2_VBPD(x) ((x) << 24) |
55 | #define S3C2410_LCDCON2_LINEVAL(x) ((x) << 14) | 57 | #define S3C2410_LCDCON2_LINEVAL(x) ((x) << 14) |
56 | #define S3C2410_LCDCON2_VFPD(x) ((x) << 6) | 58 | #define S3C2410_LCDCON2_VFPD(x) ((x) << 6) |
57 | #define S3C2410_LCDCON2_VSPW(x) ((x) << 0) | 59 | #define S3C2410_LCDCON2_VSPW(x) ((x) << 0) |
58 | 60 | ||
61 | #define S3C2410_LCDCON2_GET_VBPD(x) ( ((x) >> 24) & 0xFF) | ||
62 | #define S3C2410_LCDCON2_GET_VFPD(x) ( ((x) >> 6) & 0xFF) | ||
63 | #define S3C2410_LCDCON2_GET_VSPW(x) ( ((x) >> 0) & 0x3F) | ||
64 | |||
59 | #define S3C2410_LCDCON3_HBPD(x) ((x) << 19) | 65 | #define S3C2410_LCDCON3_HBPD(x) ((x) << 19) |
60 | #define S3C2410_LCDCON3_WDLY(x) ((x) << 19) | 66 | #define S3C2410_LCDCON3_WDLY(x) ((x) << 19) |
61 | #define S3C2410_LCDCON3_HOZVAL(x) ((x) << 8) | 67 | #define S3C2410_LCDCON3_HOZVAL(x) ((x) << 8) |
62 | #define S3C2410_LCDCON3_HFPD(x) ((x) << 0) | 68 | #define S3C2410_LCDCON3_HFPD(x) ((x) << 0) |
63 | #define S3C2410_LCDCON3_LINEBLANK(x)((x) << 0) | 69 | #define S3C2410_LCDCON3_LINEBLANK(x)((x) << 0) |
64 | 70 | ||
71 | #define S3C2410_LCDCON3_GET_HBPD(x) ( ((x) >> 19) & 0x7F) | ||
72 | #define S3C2410_LCDCON3_GET_HFPD(x) ( ((x) >> 0) & 0xFF) | ||
73 | |||
65 | #define S3C2410_LCDCON4_MVAL(x) ((x) << 8) | 74 | #define S3C2410_LCDCON4_MVAL(x) ((x) << 8) |
66 | #define S3C2410_LCDCON4_HSPW(x) ((x) << 0) | 75 | #define S3C2410_LCDCON4_HSPW(x) ((x) << 0) |
67 | #define S3C2410_LCDCON4_WLH(x) ((x) << 0) | 76 | #define S3C2410_LCDCON4_WLH(x) ((x) << 0) |
68 | 77 | ||
78 | #define S3C2410_LCDCON4_GET_HSPW(x) ( ((x) >> 0) & 0xFF) | ||
79 | |||
69 | #define S3C2410_LCDCON5_BPP24BL (1<<12) | 80 | #define S3C2410_LCDCON5_BPP24BL (1<<12) |
70 | #define S3C2410_LCDCON5_FRM565 (1<<11) | 81 | #define S3C2410_LCDCON5_FRM565 (1<<11) |
71 | #define S3C2410_LCDCON5_INVVCLK (1<<10) | 82 | #define S3C2410_LCDCON5_INVVCLK (1<<10) |
@@ -100,10 +111,16 @@ | |||
100 | #define S3C2410_DITHMODE S3C2410_LCDREG(0x4C) | 111 | #define S3C2410_DITHMODE S3C2410_LCDREG(0x4C) |
101 | #define S3C2410_TPAL S3C2410_LCDREG(0x50) | 112 | #define S3C2410_TPAL S3C2410_LCDREG(0x50) |
102 | 113 | ||
114 | #define S3C2410_TPAL_EN (1<<24) | ||
115 | |||
103 | /* interrupt info */ | 116 | /* interrupt info */ |
104 | #define S3C2410_LCDINTPND S3C2410_LCDREG(0x54) | 117 | #define S3C2410_LCDINTPND S3C2410_LCDREG(0x54) |
105 | #define S3C2410_LCDSRCPND S3C2410_LCDREG(0x58) | 118 | #define S3C2410_LCDSRCPND S3C2410_LCDREG(0x58) |
106 | #define S3C2410_LCDINTMSK S3C2410_LCDREG(0x5C) | 119 | #define S3C2410_LCDINTMSK S3C2410_LCDREG(0x5C) |
120 | #define S3C2410_LCDINT_FIWSEL (1<<2) | ||
121 | #define S3C2410_LCDINT_FRSYNC (1<<1) | ||
122 | #define S3C2410_LCDINT_FICNT (1<<0) | ||
123 | |||
107 | #define S3C2410_LPCSEL S3C2410_LCDREG(0x60) | 124 | #define S3C2410_LPCSEL S3C2410_LCDREG(0x60) |
108 | 125 | ||
109 | #define S3C2410_TFTPAL(x) S3C2410_LCDREG((0x400 + (x)*4)) | 126 | #define S3C2410_TFTPAL(x) S3C2410_LCDREG((0x400 + (x)*4)) |
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 1f906d09b688..cb4906b45555 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
@@ -16,21 +16,14 @@ | |||
16 | * Unlocked value: 0 | 16 | * Unlocked value: 0 |
17 | * Locked value: 1 | 17 | * Locked value: 1 |
18 | */ | 18 | */ |
19 | typedef struct { | ||
20 | volatile unsigned int lock; | ||
21 | #ifdef CONFIG_PREEMPT | ||
22 | unsigned int break_lock; | ||
23 | #endif | ||
24 | } spinlock_t; | ||
25 | 19 | ||
26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
21 | #define __raw_spin_unlock_wait(lock) \ | ||
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
27 | 23 | ||
28 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
32 | 25 | ||
33 | static inline void _raw_spin_lock(spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
34 | { | 27 | { |
35 | unsigned long tmp; | 28 | unsigned long tmp; |
36 | 29 | ||
@@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
47 | smp_mb(); | 40 | smp_mb(); |
48 | } | 41 | } |
49 | 42 | ||
50 | static inline int _raw_spin_trylock(spinlock_t *lock) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
51 | { | 44 | { |
52 | unsigned long tmp; | 45 | unsigned long tmp; |
53 | 46 | ||
@@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
67 | } | 60 | } |
68 | } | 61 | } |
69 | 62 | ||
70 | static inline void _raw_spin_unlock(spinlock_t *lock) | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
71 | { | 64 | { |
72 | smp_mb(); | 65 | smp_mb(); |
73 | 66 | ||
@@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
80 | 73 | ||
81 | /* | 74 | /* |
82 | * RWLOCKS | 75 | * RWLOCKS |
83 | */ | 76 | * |
84 | typedef struct { | 77 | * |
85 | volatile unsigned int lock; | ||
86 | #ifdef CONFIG_PREEMPT | ||
87 | unsigned int break_lock; | ||
88 | #endif | ||
89 | } rwlock_t; | ||
90 | |||
91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
92 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
93 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) | ||
94 | |||
95 | /* | ||
96 | * Write locks are easy - we just set bit 31. When unlocking, we can | 78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
97 | * just write zero since the lock is exclusively held. | 79 | * just write zero since the lock is exclusively held. |
98 | */ | 80 | */ |
99 | static inline void _raw_write_lock(rwlock_t *rw) | 81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
82 | |||
83 | static inline void __raw_write_lock(rwlock_t *rw) | ||
100 | { | 84 | { |
101 | unsigned long tmp; | 85 | unsigned long tmp; |
102 | 86 | ||
@@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
113 | smp_mb(); | 97 | smp_mb(); |
114 | } | 98 | } |
115 | 99 | ||
116 | static inline int _raw_write_trylock(rwlock_t *rw) | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
117 | { | 101 | { |
118 | unsigned long tmp; | 102 | unsigned long tmp; |
119 | 103 | ||
@@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
133 | } | 117 | } |
134 | } | 118 | } |
135 | 119 | ||
136 | static inline void _raw_write_unlock(rwlock_t *rw) | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
137 | { | 121 | { |
138 | smp_mb(); | 122 | smp_mb(); |
139 | 123 | ||
@@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
156 | * currently active. However, we know we won't have any write | 140 | * currently active. However, we know we won't have any write |
157 | * locks. | 141 | * locks. |
158 | */ | 142 | */ |
159 | static inline void _raw_read_lock(rwlock_t *rw) | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
160 | { | 144 | { |
161 | unsigned long tmp, tmp2; | 145 | unsigned long tmp, tmp2; |
162 | 146 | ||
@@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
173 | smp_mb(); | 157 | smp_mb(); |
174 | } | 158 | } |
175 | 159 | ||
176 | static inline void _raw_read_unlock(rwlock_t *rw) | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
177 | { | 161 | { |
178 | unsigned long tmp, tmp2; | 162 | unsigned long tmp, tmp2; |
179 | 163 | ||
@@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
190 | : "cc"); | 174 | : "cc"); |
191 | } | 175 | } |
192 | 176 | ||
193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
194 | 178 | ||
195 | #endif /* __ASM_SPINLOCK_H */ | 179 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h new file mode 100644 index 000000000000..43e83f6d2ee5 --- /dev/null +++ b/include/asm-arm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h index 278de61224d1..c49df635a80f 100644 --- a/include/asm-arm/unistd.h +++ b/include/asm-arm/unistd.h | |||
@@ -355,6 +355,9 @@ | |||
355 | #define __NR_inotify_init (__NR_SYSCALL_BASE+316) | 355 | #define __NR_inotify_init (__NR_SYSCALL_BASE+316) |
356 | #define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) | 356 | #define __NR_inotify_add_watch (__NR_SYSCALL_BASE+317) |
357 | #define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) | 357 | #define __NR_inotify_rm_watch (__NR_SYSCALL_BASE+318) |
358 | #define __NR_mbind (__NR_SYSCALL_BASE+319) | ||
359 | #define __NR_get_mempolicy (__NR_SYSCALL_BASE+320) | ||
360 | #define __NR_set_mempolicy (__NR_SYSCALL_BASE+321) | ||
358 | 361 | ||
359 | /* | 362 | /* |
360 | * The following SWIs are ARM private. | 363 | * The following SWIs are ARM private. |
diff --git a/include/asm-arm26/hardirq.h b/include/asm-arm26/hardirq.h index 791ee1da9bfa..dc28daab8aa8 100644 --- a/include/asm-arm26/hardirq.h +++ b/include/asm-arm26/hardirq.h | |||
@@ -22,8 +22,6 @@ typedef struct { | |||
22 | # error HARDIRQ_BITS is too low! | 22 | # error HARDIRQ_BITS is too low! |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #define irq_enter() (preempt_count() += HARDIRQ_OFFSET) | ||
26 | |||
27 | #ifndef CONFIG_SMP | 25 | #ifndef CONFIG_SMP |
28 | 26 | ||
29 | extern asmlinkage void __do_softirq(void); | 27 | extern asmlinkage void __do_softirq(void); |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index faff403e1061..7d0298347ee7 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -23,7 +23,11 @@ | |||
23 | * and page free order so much.. | 23 | * and page free order so much.. |
24 | */ | 24 | */ |
25 | #ifdef CONFIG_SMP | 25 | #ifdef CONFIG_SMP |
26 | #define FREE_PTE_NR 506 | 26 | #ifdef ARCH_FREE_PTR_NR |
27 | #define FREE_PTR_NR ARCH_FREE_PTR_NR | ||
28 | #else | ||
29 | #define FREE_PTE_NR 506 | ||
30 | #endif | ||
27 | #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | 31 | #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) |
28 | #else | 32 | #else |
29 | #define FREE_PTE_NR 1 | 33 | #define FREE_PTE_NR 1 |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 6f857be2b644..a9c55490fb82 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -103,3 +103,41 @@ | |||
103 | VMLINUX_SYMBOL(__kprobes_text_start) = .; \ | 103 | VMLINUX_SYMBOL(__kprobes_text_start) = .; \ |
104 | *(.kprobes.text) \ | 104 | *(.kprobes.text) \ |
105 | VMLINUX_SYMBOL(__kprobes_text_end) = .; | 105 | VMLINUX_SYMBOL(__kprobes_text_end) = .; |
106 | |||
107 | /* DWARF debug sections. | ||
108 | Symbols in the DWARF debugging sections are relative to | ||
109 | the beginning of the section so we begin them at 0. */ | ||
110 | #define DWARF_DEBUG \ | ||
111 | /* DWARF 1 */ \ | ||
112 | .debug 0 : { *(.debug) } \ | ||
113 | .line 0 : { *(.line) } \ | ||
114 | /* GNU DWARF 1 extensions */ \ | ||
115 | .debug_srcinfo 0 : { *(.debug_srcinfo) } \ | ||
116 | .debug_sfnames 0 : { *(.debug_sfnames) } \ | ||
117 | /* DWARF 1.1 and DWARF 2 */ \ | ||
118 | .debug_aranges 0 : { *(.debug_aranges) } \ | ||
119 | .debug_pubnames 0 : { *(.debug_pubnames) } \ | ||
120 | /* DWARF 2 */ \ | ||
121 | .debug_info 0 : { *(.debug_info \ | ||
122 | .gnu.linkonce.wi.*) } \ | ||
123 | .debug_abbrev 0 : { *(.debug_abbrev) } \ | ||
124 | .debug_line 0 : { *(.debug_line) } \ | ||
125 | .debug_frame 0 : { *(.debug_frame) } \ | ||
126 | .debug_str 0 : { *(.debug_str) } \ | ||
127 | .debug_loc 0 : { *(.debug_loc) } \ | ||
128 | .debug_macinfo 0 : { *(.debug_macinfo) } \ | ||
129 | /* SGI/MIPS DWARF 2 extensions */ \ | ||
130 | .debug_weaknames 0 : { *(.debug_weaknames) } \ | ||
131 | .debug_funcnames 0 : { *(.debug_funcnames) } \ | ||
132 | .debug_typenames 0 : { *(.debug_typenames) } \ | ||
133 | .debug_varnames 0 : { *(.debug_varnames) } \ | ||
134 | |||
135 | /* Stabs debugging sections. */ | ||
136 | #define STABS_DEBUG \ | ||
137 | .stab 0 : { *(.stab) } \ | ||
138 | .stabstr 0 : { *(.stabstr) } \ | ||
139 | .stab.excl 0 : { *(.stab.excl) } \ | ||
140 | .stab.exclstr 0 : { *(.stab.exclstr) } \ | ||
141 | .stab.index 0 : { *(.stab.index) } \ | ||
142 | .stab.indexstr 0 : { *(.stab.indexstr) } \ | ||
143 | .comment 0 : { *(.comment) } | ||
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 6a1b1882285c..8c454aa58ac6 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h | |||
@@ -130,6 +130,8 @@ extern unsigned int nmi_watchdog; | |||
130 | #define NMI_LOCAL_APIC 2 | 130 | #define NMI_LOCAL_APIC 2 |
131 | #define NMI_INVALID 3 | 131 | #define NMI_INVALID 3 |
132 | 132 | ||
133 | extern int disable_timer_pin_1; | ||
134 | |||
133 | #else /* !CONFIG_X86_LOCAL_APIC */ | 135 | #else /* !CONFIG_X86_LOCAL_APIC */ |
134 | static inline void lapic_shutdown(void) { } | 136 | static inline void lapic_shutdown(void) { } |
135 | 137 | ||
diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h index 28ed8b296afc..75c67c785bb8 100644 --- a/include/asm-i386/div64.h +++ b/include/asm-i386/div64.h | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) | 36 | #define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) |
37 | 37 | ||
38 | extern inline long | 38 | static inline long |
39 | div_ll_X_l_rem(long long divs, long div, long *rem) | 39 | div_ll_X_l_rem(long long divs, long div, long *rem) |
40 | { | 40 | { |
41 | long dum2; | 41 | long dum2; |
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h index 521e227db679..06ae4d81ba6a 100644 --- a/include/asm-i386/mach-default/mach_reboot.h +++ b/include/asm-i386/mach-default/mach_reboot.h | |||
@@ -22,7 +22,15 @@ static inline void mach_reboot(void) | |||
22 | for (i = 0; i < 100; i++) { | 22 | for (i = 0; i < 100; i++) { |
23 | kb_wait(); | 23 | kb_wait(); |
24 | udelay(50); | 24 | udelay(50); |
25 | outb(0xfe, 0x64); /* pulse reset low */ | 25 | outb(0x60, 0x64); /* write Controller Command Byte */ |
26 | udelay(50); | ||
27 | kb_wait(); | ||
28 | udelay(50); | ||
29 | outb(0x14, 0x60); /* set "System flag" */ | ||
30 | udelay(50); | ||
31 | kb_wait(); | ||
32 | udelay(50); | ||
33 | outb(0xfe, 0x64); /* pulse reset low */ | ||
26 | udelay(50); | 34 | udelay(50); |
27 | } | 35 | } |
28 | } | 36 | } |
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 516421300ea2..348fe3a4879d 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h | |||
@@ -29,7 +29,7 @@ static inline void get_memcfg_numa(void) | |||
29 | #ifdef CONFIG_X86_NUMAQ | 29 | #ifdef CONFIG_X86_NUMAQ |
30 | if (get_memcfg_numaq()) | 30 | if (get_memcfg_numaq()) |
31 | return; | 31 | return; |
32 | #elif CONFIG_ACPI_SRAT | 32 | #elif defined(CONFIG_ACPI_SRAT) |
33 | if (get_memcfg_from_srat()) | 33 | if (get_memcfg_from_srat()) |
34 | return; | 34 | return; |
35 | #endif | 35 | #endif |
diff --git a/include/asm-i386/numa.h b/include/asm-i386/numa.h new file mode 100644 index 000000000000..96fcb157db1d --- /dev/null +++ b/include/asm-i386/numa.h | |||
@@ -0,0 +1,3 @@ | |||
1 | |||
2 | int pxm_to_nid(int pxm); | ||
3 | |||
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 37bef8ed7bed..0a4ec764377c 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -679,7 +679,7 @@ static inline void rep_nop(void) | |||
679 | However we don't do prefetches for pre XP Athlons currently | 679 | However we don't do prefetches for pre XP Athlons currently |
680 | That should be fixed. */ | 680 | That should be fixed. */ |
681 | #define ARCH_HAS_PREFETCH | 681 | #define ARCH_HAS_PREFETCH |
682 | extern inline void prefetch(const void *x) | 682 | static inline void prefetch(const void *x) |
683 | { | 683 | { |
684 | alternative_input(ASM_NOP4, | 684 | alternative_input(ASM_NOP4, |
685 | "prefetchnta (%1)", | 685 | "prefetchnta (%1)", |
@@ -693,7 +693,7 @@ extern inline void prefetch(const void *x) | |||
693 | 693 | ||
694 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 694 | /* 3dnow! prefetch to get an exclusive cache line. Useful for |
695 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 695 | spinlocks to avoid one state transition in the cache coherency protocol. */ |
696 | extern inline void prefetchw(const void *x) | 696 | static inline void prefetchw(const void *x) |
697 | { | 697 | { |
698 | alternative_input(ASM_NOP4, | 698 | alternative_input(ASM_NOP4, |
699 | "prefetchw (%1)", | 699 | "prefetchw (%1)", |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index f9ff31f40036..23604350cdf4 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -7,46 +7,21 @@ | |||
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | 9 | ||
10 | asmlinkage int printk(const char * fmt, ...) | ||
11 | __attribute__ ((format (printf, 1, 2))); | ||
12 | |||
13 | /* | 10 | /* |
14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
15 | */ | 12 | * |
16 | |||
17 | typedef struct { | ||
18 | volatile unsigned int slock; | ||
19 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
20 | unsigned magic; | ||
21 | #endif | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPINLOCK_MAGIC 0xdead4ead | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
30 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
31 | #else | ||
32 | #define SPINLOCK_MAGIC_INIT /* */ | ||
33 | #endif | ||
34 | |||
35 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
36 | |||
37 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
38 | |||
39 | /* | ||
40 | * Simple spin lock operations. There are two variants, one clears IRQ's | 13 | * Simple spin lock operations. There are two variants, one clears IRQ's |
41 | * on the local processor, one does not. | 14 | * on the local processor, one does not. |
42 | * | 15 | * |
43 | * We make no fairness assumptions. They have a cost. | 16 | * We make no fairness assumptions. They have a cost. |
17 | * | ||
18 | * (the type definitions are in asm/spinlock_types.h) | ||
44 | */ | 19 | */ |
45 | 20 | ||
46 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0) | 21 | #define __raw_spin_is_locked(x) \ |
47 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 22 | (*(volatile signed char *)(&(x)->slock) <= 0) |
48 | 23 | ||
49 | #define spin_lock_string \ | 24 | #define __raw_spin_lock_string \ |
50 | "\n1:\t" \ | 25 | "\n1:\t" \ |
51 | "lock ; decb %0\n\t" \ | 26 | "lock ; decb %0\n\t" \ |
52 | "jns 3f\n" \ | 27 | "jns 3f\n" \ |
@@ -57,7 +32,7 @@ typedef struct { | |||
57 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
58 | "3:\n\t" | 33 | "3:\n\t" |
59 | 34 | ||
60 | #define spin_lock_string_flags \ | 35 | #define __raw_spin_lock_string_flags \ |
61 | "\n1:\t" \ | 36 | "\n1:\t" \ |
62 | "lock ; decb %0\n\t" \ | 37 | "lock ; decb %0\n\t" \ |
63 | "jns 4f\n\t" \ | 38 | "jns 4f\n\t" \ |
@@ -73,86 +48,71 @@ typedef struct { | |||
73 | "jmp 1b\n" \ | 48 | "jmp 1b\n" \ |
74 | "4:\n\t" | 49 | "4:\n\t" |
75 | 50 | ||
51 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
52 | { | ||
53 | __asm__ __volatile__( | ||
54 | __raw_spin_lock_string | ||
55 | :"=m" (lock->slock) : : "memory"); | ||
56 | } | ||
57 | |||
58 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
59 | { | ||
60 | __asm__ __volatile__( | ||
61 | __raw_spin_lock_string_flags | ||
62 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
63 | } | ||
64 | |||
65 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
66 | { | ||
67 | char oldval; | ||
68 | __asm__ __volatile__( | ||
69 | "xchgb %b0,%1" | ||
70 | :"=q" (oldval), "=m" (lock->slock) | ||
71 | :"0" (0) : "memory"); | ||
72 | return oldval > 0; | ||
73 | } | ||
74 | |||
76 | /* | 75 | /* |
77 | * This works. Despite all the confusion. | 76 | * __raw_spin_unlock based on writing $1 to the low byte. |
78 | * (except on PPro SMP or if we are using OOSTORE) | 77 | * This method works. Despite all the confusion. |
78 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | ||
79 | * (PPro errata 66, 92) | 79 | * (PPro errata 66, 92) |
80 | */ | 80 | */ |
81 | 81 | ||
82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 82 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) |
83 | 83 | ||
84 | #define spin_unlock_string \ | 84 | #define __raw_spin_unlock_string \ |
85 | "movb $1,%0" \ | 85 | "movb $1,%0" \ |
86 | :"=m" (lock->slock) : : "memory" | 86 | :"=m" (lock->slock) : : "memory" |
87 | 87 | ||
88 | 88 | ||
89 | static inline void _raw_spin_unlock(spinlock_t *lock) | 89 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
90 | { | 90 | { |
91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
92 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
93 | BUG_ON(!spin_is_locked(lock)); | ||
94 | #endif | ||
95 | __asm__ __volatile__( | 91 | __asm__ __volatile__( |
96 | spin_unlock_string | 92 | __raw_spin_unlock_string |
97 | ); | 93 | ); |
98 | } | 94 | } |
99 | 95 | ||
100 | #else | 96 | #else |
101 | 97 | ||
102 | #define spin_unlock_string \ | 98 | #define __raw_spin_unlock_string \ |
103 | "xchgb %b0, %1" \ | 99 | "xchgb %b0, %1" \ |
104 | :"=q" (oldval), "=m" (lock->slock) \ | 100 | :"=q" (oldval), "=m" (lock->slock) \ |
105 | :"0" (oldval) : "memory" | 101 | :"0" (oldval) : "memory" |
106 | 102 | ||
107 | static inline void _raw_spin_unlock(spinlock_t *lock) | 103 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
108 | { | 104 | { |
109 | char oldval = 1; | 105 | char oldval = 1; |
110 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
111 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
112 | BUG_ON(!spin_is_locked(lock)); | ||
113 | #endif | ||
114 | __asm__ __volatile__( | ||
115 | spin_unlock_string | ||
116 | ); | ||
117 | } | ||
118 | 106 | ||
119 | #endif | ||
120 | |||
121 | static inline int _raw_spin_trylock(spinlock_t *lock) | ||
122 | { | ||
123 | char oldval; | ||
124 | __asm__ __volatile__( | 107 | __asm__ __volatile__( |
125 | "xchgb %b0,%1" | 108 | __raw_spin_unlock_string |
126 | :"=q" (oldval), "=m" (lock->slock) | 109 | ); |
127 | :"0" (0) : "memory"); | ||
128 | return oldval > 0; | ||
129 | } | 110 | } |
130 | 111 | ||
131 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
132 | { | ||
133 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
134 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
135 | printk("eip: %p\n", __builtin_return_address(0)); | ||
136 | BUG(); | ||
137 | } | ||
138 | #endif | 112 | #endif |
139 | __asm__ __volatile__( | ||
140 | spin_lock_string | ||
141 | :"=m" (lock->slock) : : "memory"); | ||
142 | } | ||
143 | 113 | ||
144 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 114 | #define __raw_spin_unlock_wait(lock) \ |
145 | { | 115 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
146 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
147 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
148 | printk("eip: %p\n", __builtin_return_address(0)); | ||
149 | BUG(); | ||
150 | } | ||
151 | #endif | ||
152 | __asm__ __volatile__( | ||
153 | spin_lock_string_flags | ||
154 | :"=m" (lock->slock) : "r" (flags) : "memory"); | ||
155 | } | ||
156 | 116 | ||
157 | /* | 117 | /* |
158 | * Read-write spinlocks, allowing multiple readers | 118 | * Read-write spinlocks, allowing multiple readers |
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
163 | * can "mix" irq-safe locks - any writer needs to get a | 123 | * can "mix" irq-safe locks - any writer needs to get a |
164 | * irq-safe write-lock, but readers can get non-irqsafe | 124 | * irq-safe write-lock, but readers can get non-irqsafe |
165 | * read-locks. | 125 | * read-locks. |
126 | * | ||
127 | * On x86, we implement read-write locks as a 32-bit counter | ||
128 | * with the high bit (sign) being the "contended" bit. | ||
129 | * | ||
130 | * The inline assembly is non-obvious. Think about it. | ||
131 | * | ||
132 | * Changed to use the same technique as rw semaphores. See | ||
133 | * semaphore.h for details. -ben | ||
134 | * | ||
135 | * the helpers are in arch/i386/kernel/semaphore.c | ||
166 | */ | 136 | */ |
167 | typedef struct { | ||
168 | volatile unsigned int lock; | ||
169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
170 | unsigned magic; | ||
171 | #endif | ||
172 | #ifdef CONFIG_PREEMPT | ||
173 | unsigned int break_lock; | ||
174 | #endif | ||
175 | } rwlock_t; | ||
176 | |||
177 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
178 | |||
179 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
180 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
181 | #else | ||
182 | #define RWLOCK_MAGIC_INIT /* */ | ||
183 | #endif | ||
184 | |||
185 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
186 | |||
187 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
188 | 137 | ||
189 | /** | 138 | /** |
190 | * read_can_lock - would read_trylock() succeed? | 139 | * read_can_lock - would read_trylock() succeed? |
191 | * @lock: the rwlock in question. | 140 | * @lock: the rwlock in question. |
192 | */ | 141 | */ |
193 | #define read_can_lock(x) ((int)(x)->lock > 0) | 142 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
194 | 143 | ||
195 | /** | 144 | /** |
196 | * write_can_lock - would write_trylock() succeed? | 145 | * write_can_lock - would write_trylock() succeed? |
197 | * @lock: the rwlock in question. | 146 | * @lock: the rwlock in question. |
198 | */ | 147 | */ |
199 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 148 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
200 | 149 | ||
201 | /* | 150 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
202 | * On x86, we implement read-write locks as a 32-bit counter | ||
203 | * with the high bit (sign) being the "contended" bit. | ||
204 | * | ||
205 | * The inline assembly is non-obvious. Think about it. | ||
206 | * | ||
207 | * Changed to use the same technique as rw semaphores. See | ||
208 | * semaphore.h for details. -ben | ||
209 | */ | ||
210 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
211 | |||
212 | static inline void _raw_read_lock(rwlock_t *rw) | ||
213 | { | 151 | { |
214 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
215 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
216 | #endif | ||
217 | __build_read_lock(rw, "__read_lock_failed"); | 152 | __build_read_lock(rw, "__read_lock_failed"); |
218 | } | 153 | } |
219 | 154 | ||
220 | static inline void _raw_write_lock(rwlock_t *rw) | 155 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
221 | { | 156 | { |
222 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
223 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
224 | #endif | ||
225 | __build_write_lock(rw, "__write_lock_failed"); | 157 | __build_write_lock(rw, "__write_lock_failed"); |
226 | } | 158 | } |
227 | 159 | ||
228 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 160 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
229 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
230 | |||
231 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
232 | { | 161 | { |
233 | atomic_t *count = (atomic_t *)lock; | 162 | atomic_t *count = (atomic_t *)lock; |
234 | atomic_dec(count); | 163 | atomic_dec(count); |
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
238 | return 0; | 167 | return 0; |
239 | } | 168 | } |
240 | 169 | ||
241 | static inline int _raw_write_trylock(rwlock_t *lock) | 170 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
242 | { | 171 | { |
243 | atomic_t *count = (atomic_t *)lock; | 172 | atomic_t *count = (atomic_t *)lock; |
244 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 173 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
247 | return 0; | 176 | return 0; |
248 | } | 177 | } |
249 | 178 | ||
179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
180 | { | ||
181 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
182 | } | ||
183 | |||
184 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
185 | { | ||
186 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" | ||
187 | : "=m" (rw->lock) : : "memory"); | ||
188 | } | ||
189 | |||
250 | #endif /* __ASM_SPINLOCK_H */ | 190 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-i386/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index e2cb9fa6f563..8fbf791651bf 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h | |||
@@ -48,7 +48,7 @@ struct thread_info { | |||
48 | 48 | ||
49 | #else /* !__ASSEMBLY__ */ | 49 | #else /* !__ASSEMBLY__ */ |
50 | 50 | ||
51 | #include <asm/asm_offsets.h> | 51 | #include <asm/asm-offsets.h> |
52 | 52 | ||
53 | #endif | 53 | #endif |
54 | 54 | ||
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index 2461b731781e..0ec27c9e8e45 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
@@ -60,7 +60,7 @@ static inline int node_to_first_cpu(int node) | |||
60 | return first_cpu(mask); | 60 | return first_cpu(mask); |
61 | } | 61 | } |
62 | 62 | ||
63 | #define pcibus_to_node(bus) mp_bus_id_to_node[(bus)->number] | 63 | #define pcibus_to_node(bus) ((long) (bus)->sysdata) |
64 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) | 64 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) |
65 | 65 | ||
66 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ | 66 | /* sched_domains SD_NODE_INIT for NUMAQ machines */ |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index a7cb377745bf..fbaf90a3968c 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -332,7 +332,7 @@ type name(type1 arg1) \ | |||
332 | long __res; \ | 332 | long __res; \ |
333 | __asm__ volatile ("int $0x80" \ | 333 | __asm__ volatile ("int $0x80" \ |
334 | : "=a" (__res) \ | 334 | : "=a" (__res) \ |
335 | : "0" (__NR_##name),"b" ((long)(arg1))); \ | 335 | : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \ |
336 | __syscall_return(type,__res); \ | 336 | __syscall_return(type,__res); \ |
337 | } | 337 | } |
338 | 338 | ||
@@ -342,7 +342,7 @@ type name(type1 arg1,type2 arg2) \ | |||
342 | long __res; \ | 342 | long __res; \ |
343 | __asm__ volatile ("int $0x80" \ | 343 | __asm__ volatile ("int $0x80" \ |
344 | : "=a" (__res) \ | 344 | : "=a" (__res) \ |
345 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2))); \ | 345 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \ |
346 | __syscall_return(type,__res); \ | 346 | __syscall_return(type,__res); \ |
347 | } | 347 | } |
348 | 348 | ||
@@ -353,7 +353,7 @@ long __res; \ | |||
353 | __asm__ volatile ("int $0x80" \ | 353 | __asm__ volatile ("int $0x80" \ |
354 | : "=a" (__res) \ | 354 | : "=a" (__res) \ |
355 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 355 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
356 | "d" ((long)(arg3))); \ | 356 | "d" ((long)(arg3)) : "memory"); \ |
357 | __syscall_return(type,__res); \ | 357 | __syscall_return(type,__res); \ |
358 | } | 358 | } |
359 | 359 | ||
@@ -364,7 +364,7 @@ long __res; \ | |||
364 | __asm__ volatile ("int $0x80" \ | 364 | __asm__ volatile ("int $0x80" \ |
365 | : "=a" (__res) \ | 365 | : "=a" (__res) \ |
366 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 366 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
367 | "d" ((long)(arg3)),"S" ((long)(arg4))); \ | 367 | "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ |
368 | __syscall_return(type,__res); \ | 368 | __syscall_return(type,__res); \ |
369 | } | 369 | } |
370 | 370 | ||
@@ -376,7 +376,7 @@ long __res; \ | |||
376 | __asm__ volatile ("int $0x80" \ | 376 | __asm__ volatile ("int $0x80" \ |
377 | : "=a" (__res) \ | 377 | : "=a" (__res) \ |
378 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 378 | : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
379 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5))); \ | 379 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \ |
380 | __syscall_return(type,__res); \ | 380 | __syscall_return(type,__res); \ |
381 | } | 381 | } |
382 | 382 | ||
@@ -389,7 +389,7 @@ __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; p | |||
389 | : "=a" (__res) \ | 389 | : "=a" (__res) \ |
390 | : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ | 390 | : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ |
391 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ | 391 | "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ |
392 | "0" ((long)(arg6))); \ | 392 | "0" ((long)(arg6)) : "memory"); \ |
393 | __syscall_return(type,__res); \ | 393 | __syscall_return(type,__res); \ |
394 | } | 394 | } |
395 | 395 | ||
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 149ad0118455..97a28b8b2ddd 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h | |||
@@ -11,8 +11,6 @@ | |||
11 | #ifndef _ASM_IA64_MCA_H | 11 | #ifndef _ASM_IA64_MCA_H |
12 | #define _ASM_IA64_MCA_H | 12 | #define _ASM_IA64_MCA_H |
13 | 13 | ||
14 | #define IA64_MCA_STACK_SIZE 8192 | ||
15 | |||
16 | #if !defined(__ASSEMBLY__) | 14 | #if !defined(__ASSEMBLY__) |
17 | 15 | ||
18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
@@ -48,7 +46,8 @@ typedef union cmcv_reg_u { | |||
48 | 46 | ||
49 | enum { | 47 | enum { |
50 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, | 48 | IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, |
51 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 | 49 | IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, |
50 | IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, | ||
52 | }; | 51 | }; |
53 | 52 | ||
54 | /* Information maintained by the MC infrastructure */ | 53 | /* Information maintained by the MC infrastructure */ |
@@ -63,18 +62,42 @@ typedef struct ia64_mc_info_s { | |||
63 | 62 | ||
64 | } ia64_mc_info_t; | 63 | } ia64_mc_info_t; |
65 | 64 | ||
66 | typedef struct ia64_mca_sal_to_os_state_s { | 65 | /* Handover state from SAL to OS and vice versa, for both MCA and INIT events. |
67 | u64 imsto_os_gp; /* GP of the os registered with the SAL */ | 66 | * Besides the handover state, it also contains some saved registers from the |
68 | u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ | 67 | * time of the event. |
69 | u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ | 68 | * Note: mca_asm.S depends on the precise layout of this structure. |
70 | u64 imsto_sal_gp; /* GP of the SAL - physical */ | 69 | */ |
71 | u64 imsto_rendez_state; /* Rendez state information */ | 70 | |
72 | u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going | 71 | struct ia64_sal_os_state { |
73 | * back to SAL from OS after MCA handling. | 72 | /* SAL to OS, must be at offset 0 */ |
74 | */ | 73 | u64 os_gp; /* GP of the os registered with the SAL, physical */ |
75 | u64 pal_min_state; /* from PAL in r17 */ | 74 | u64 pal_proc; /* PAL_PROC entry point, physical */ |
76 | u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ | 75 | u64 sal_proc; /* SAL_PROC entry point, physical */ |
77 | } ia64_mca_sal_to_os_state_t; | 76 | u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ |
77 | u64 proc_state_param; /* from R18 */ | ||
78 | u64 monarch; /* 1 for a monarch event, 0 for a slave */ | ||
79 | /* common, must follow SAL to OS */ | ||
80 | u64 sal_ra; /* Return address in SAL, physical */ | ||
81 | u64 sal_gp; /* GP of the SAL - physical */ | ||
82 | pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ | ||
83 | u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ | ||
84 | struct task_struct *prev_task; /* previous task, NULL if it is not useful */ | ||
85 | /* Some interrupt registers are not saved in minstate, pt_regs or | ||
86 | * switch_stack. Because MCA/INIT can occur when interrupts are | ||
87 | * disabled, we need to save the additional interrupt registers over | ||
88 | * MCA/INIT and resume. | ||
89 | */ | ||
90 | u64 isr; | ||
91 | u64 ifa; | ||
92 | u64 itir; | ||
93 | u64 iipa; | ||
94 | u64 iim; | ||
95 | u64 iha; | ||
96 | /* OS to SAL, must follow common */ | ||
97 | u64 os_status; /* OS status to SAL, enum below */ | ||
98 | u64 context; /* 0 if return to same context | ||
99 | 1 if return to new context */ | ||
100 | }; | ||
78 | 101 | ||
79 | enum { | 102 | enum { |
80 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ | 103 | IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ |
@@ -84,35 +107,21 @@ enum { | |||
84 | }; | 107 | }; |
85 | 108 | ||
86 | enum { | 109 | enum { |
110 | IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */ | ||
111 | IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ | ||
112 | }; | ||
113 | |||
114 | enum { | ||
87 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ | 115 | IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ |
88 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ | 116 | IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ |
89 | }; | 117 | }; |
90 | 118 | ||
91 | typedef struct ia64_mca_os_to_sal_state_s { | ||
92 | u64 imots_os_status; /* OS status to SAL as to what happened | ||
93 | * with the MCA handling. | ||
94 | */ | ||
95 | u64 imots_sal_gp; /* GP of the SAL - physical */ | ||
96 | u64 imots_context; /* 0 if return to same context | ||
97 | 1 if return to new context */ | ||
98 | u64 *imots_new_min_state; /* Pointer to structure containing | ||
99 | * new values of registers in the min state | ||
100 | * save area. | ||
101 | */ | ||
102 | u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going | ||
103 | * back to SAL from OS after MCA handling. | ||
104 | */ | ||
105 | } ia64_mca_os_to_sal_state_t; | ||
106 | |||
107 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ | 119 | /* Per-CPU MCA state that is too big for normal per-CPU variables. */ |
108 | 120 | ||
109 | struct ia64_mca_cpu { | 121 | struct ia64_mca_cpu { |
110 | u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ | 122 | u64 mca_stack[KERNEL_STACK_SIZE/8]; |
111 | u64 proc_state_dump[512]; | ||
112 | u64 stackframe[32]; | ||
113 | u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ | ||
114 | u64 init_stack[KERNEL_STACK_SIZE/8]; | 123 | u64 init_stack[KERNEL_STACK_SIZE/8]; |
115 | } __attribute__ ((aligned(16))); | 124 | }; |
116 | 125 | ||
117 | /* Array of physical addresses of each CPU's MCA area. */ | 126 | /* Array of physical addresses of each CPU's MCA area. */ |
118 | extern unsigned long __per_cpu_mca[NR_CPUS]; | 127 | extern unsigned long __per_cpu_mca[NR_CPUS]; |
@@ -121,12 +130,29 @@ extern void ia64_mca_init(void); | |||
121 | extern void ia64_mca_cpu_init(void *); | 130 | extern void ia64_mca_cpu_init(void *); |
122 | extern void ia64_os_mca_dispatch(void); | 131 | extern void ia64_os_mca_dispatch(void); |
123 | extern void ia64_os_mca_dispatch_end(void); | 132 | extern void ia64_os_mca_dispatch_end(void); |
124 | extern void ia64_mca_ucmc_handler(void); | 133 | extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); |
134 | extern void ia64_init_handler(struct pt_regs *, | ||
135 | struct switch_stack *, | ||
136 | struct ia64_sal_os_state *); | ||
125 | extern void ia64_monarch_init_handler(void); | 137 | extern void ia64_monarch_init_handler(void); |
126 | extern void ia64_slave_init_handler(void); | 138 | extern void ia64_slave_init_handler(void); |
127 | extern void ia64_mca_cmc_vector_setup(void); | 139 | extern void ia64_mca_cmc_vector_setup(void); |
128 | extern int ia64_reg_MCA_extension(void*); | 140 | extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); |
129 | extern void ia64_unreg_MCA_extension(void); | 141 | extern void ia64_unreg_MCA_extension(void); |
142 | extern u64 ia64_get_rnat(u64 *); | ||
143 | |||
144 | #else /* __ASSEMBLY__ */ | ||
145 | |||
146 | #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ | ||
147 | #define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
148 | #define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */ | ||
149 | #define IA64_MCA_HALT -3 /* System to be halted by SAL */ | ||
150 | |||
151 | #define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */ | ||
152 | #define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */ | ||
153 | |||
154 | #define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */ | ||
155 | #define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */ | ||
130 | 156 | ||
131 | #endif /* !__ASSEMBLY__ */ | 157 | #endif /* !__ASSEMBLY__ */ |
132 | #endif /* _ASM_IA64_MCA_H */ | 158 | #endif /* _ASM_IA64_MCA_H */ |
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 836953e0f91f..27c9203d8ce3 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h | |||
@@ -8,6 +8,8 @@ | |||
8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> | 8 | * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> |
9 | * Copyright (C) 2002 Intel Corp. | 9 | * Copyright (C) 2002 Intel Corp. |
10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> | 10 | * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> |
11 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
12 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
11 | */ | 13 | */ |
12 | #ifndef _ASM_IA64_MCA_ASM_H | 14 | #ifndef _ASM_IA64_MCA_ASM_H |
13 | #define _ASM_IA64_MCA_ASM_H | 15 | #define _ASM_IA64_MCA_ASM_H |
@@ -207,106 +209,33 @@ | |||
207 | ;; | 209 | ;; |
208 | 210 | ||
209 | /* | 211 | /* |
210 | * The following offsets capture the order in which the | 212 | * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel |
211 | * RSE related registers from the old context are | 213 | * stacks, except that the SAL/OS state and a switch_stack are stored near the |
212 | * saved onto the new stack frame. | 214 | * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as |
215 | * well as MCA over INIT, each event needs its own SAL/OS state. All entries | ||
216 | * are 16 byte aligned. | ||
213 | * | 217 | * |
214 | * +-----------------------+ | 218 | * +---------------------------+ |
215 | * |NDIRTY [BSP - BSPSTORE]| | 219 | * | pt_regs | |
216 | * +-----------------------+ | 220 | * +---------------------------+ |
217 | * | RNAT | | 221 | * | switch_stack | |
218 | * +-----------------------+ | 222 | * +---------------------------+ |
219 | * | BSPSTORE | | 223 | * | SAL/OS state | |
220 | * +-----------------------+ | 224 | * +---------------------------+ |
221 | * | IFS | | 225 | * | 16 byte scratch area | |
222 | * +-----------------------+ | 226 | * +---------------------------+ <-------- SP at start of C MCA handler |
223 | * | PFS | | 227 | * | ..... | |
224 | * +-----------------------+ | 228 | * +---------------------------+ |
225 | * | RSC | | 229 | * | RBS for MCA/INIT handler | |
226 | * +-----------------------+ <-------- Bottom of new stack frame | 230 | * +---------------------------+ |
231 | * | struct task for MCA/INIT | | ||
232 | * +---------------------------+ <-------- Bottom of MCA/INIT stack | ||
227 | */ | 233 | */ |
228 | #define rse_rsc_offset 0 | ||
229 | #define rse_pfs_offset (rse_rsc_offset+0x08) | ||
230 | #define rse_ifs_offset (rse_pfs_offset+0x08) | ||
231 | #define rse_bspstore_offset (rse_ifs_offset+0x08) | ||
232 | #define rse_rnat_offset (rse_bspstore_offset+0x08) | ||
233 | #define rse_ndirty_offset (rse_rnat_offset+0x08) | ||
234 | 234 | ||
235 | /* | 235 | #define ALIGN16(x) ((x)&~15) |
236 | * rse_switch_context | 236 | #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) |
237 | * | 237 | #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) |
238 | * 1. Save old RSC onto the new stack frame | 238 | #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) |
239 | * 2. Save PFS onto new stack frame | 239 | #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) |
240 | * 3. Cover the old frame and start a new frame. | ||
241 | * 4. Save IFS onto new stack frame | ||
242 | * 5. Save the old BSPSTORE on the new stack frame | ||
243 | * 6. Save the old RNAT on the new stack frame | ||
244 | * 7. Write BSPSTORE with the new backing store pointer | ||
245 | * 8. Read and save the new BSP to calculate the #dirty registers | ||
246 | * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 | ||
247 | */ | ||
248 | #define rse_switch_context(temp,p_stackframe,p_bspstore) \ | ||
249 | ;; \ | ||
250 | mov temp=ar.rsc;; \ | ||
251 | st8 [p_stackframe]=temp,8;; \ | ||
252 | mov temp=ar.pfs;; \ | ||
253 | st8 [p_stackframe]=temp,8; \ | ||
254 | cover ;; \ | ||
255 | mov temp=cr.ifs;; \ | ||
256 | st8 [p_stackframe]=temp,8;; \ | ||
257 | mov temp=ar.bspstore;; \ | ||
258 | st8 [p_stackframe]=temp,8;; \ | ||
259 | mov temp=ar.rnat;; \ | ||
260 | st8 [p_stackframe]=temp,8; \ | ||
261 | mov ar.bspstore=p_bspstore;; \ | ||
262 | mov temp=ar.bsp;; \ | ||
263 | sub temp=temp,p_bspstore;; \ | ||
264 | st8 [p_stackframe]=temp,8;; | ||
265 | |||
266 | /* | ||
267 | * rse_return_context | ||
268 | * 1. Allocate a zero-sized frame | ||
269 | * 2. Store the number of dirty registers RSC.loadrs field | ||
270 | * 3. Issue a loadrs to insure that any registers from the interrupted | ||
271 | * context which were saved on the new stack frame have been loaded | ||
272 | * back into the stacked registers | ||
273 | * 4. Restore BSPSTORE | ||
274 | * 5. Restore RNAT | ||
275 | * 6. Restore PFS | ||
276 | * 7. Restore IFS | ||
277 | * 8. Restore RSC | ||
278 | * 9. Issue an RFI | ||
279 | */ | ||
280 | #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ | ||
281 | ;; \ | ||
282 | alloc temp=ar.pfs,0,0,0,0; \ | ||
283 | add p_stackframe=rse_ndirty_offset,p_stackframe;; \ | ||
284 | ld8 temp=[p_stackframe];; \ | ||
285 | shl temp=temp,16;; \ | ||
286 | mov ar.rsc=temp;; \ | ||
287 | loadrs;; \ | ||
288 | add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ | ||
289 | ld8 temp=[p_stackframe];; \ | ||
290 | mov ar.bspstore=temp;; \ | ||
291 | add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ | ||
292 | ld8 temp=[p_stackframe];; \ | ||
293 | mov ar.rnat=temp;; \ | ||
294 | add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ | ||
295 | ld8 temp=[p_stackframe];; \ | ||
296 | mov ar.pfs=temp;; \ | ||
297 | add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ | ||
298 | ld8 temp=[p_stackframe];; \ | ||
299 | mov cr.ifs=temp;; \ | ||
300 | add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ | ||
301 | ld8 temp=[p_stackframe];; \ | ||
302 | mov ar.rsc=temp ; \ | ||
303 | mov temp=psr;; \ | ||
304 | or temp=temp,psr_mask_reg;; \ | ||
305 | mov cr.ipsr=temp;; \ | ||
306 | mov temp=ip;; \ | ||
307 | add temp=0x30,temp;; \ | ||
308 | mov cr.iip=temp;; \ | ||
309 | srlz.i;; \ | ||
310 | rfi;; | ||
311 | 240 | ||
312 | #endif /* _ASM_IA64_MCA_ASM_H */ | 241 | #endif /* _ASM_IA64_MCA_ASM_H */ |
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 0bef19538406..fc544929ac34 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #include <linux/config.h> | 57 | #include <linux/config.h> |
58 | 58 | ||
59 | #include <asm/fpu.h> | 59 | #include <asm/fpu.h> |
60 | #include <asm/offsets.h> | 60 | #include <asm/asm-offsets.h> |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Base-2 logarithm of number of pages to allocate per task structure | 63 | * Base-2 logarithm of number of pages to allocate per task structure |
@@ -119,7 +119,7 @@ struct pt_regs { | |||
119 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ | 119 | unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ |
120 | unsigned long ar_pfs; /* prev function state */ | 120 | unsigned long ar_pfs; /* prev function state */ |
121 | unsigned long ar_rsc; /* RSE configuration */ | 121 | unsigned long ar_rsc; /* RSE configuration */ |
122 | /* The following two are valid only if cr_ipsr.cpl > 0: */ | 122 | /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */ |
123 | unsigned long ar_rnat; /* RSE NaT */ | 123 | unsigned long ar_rnat; /* RSE NaT */ |
124 | unsigned long ar_bspstore; /* RSE bspstore */ | 124 | unsigned long ar_bspstore; /* RSE bspstore */ |
125 | 125 | ||
diff --git a/include/asm-ia64/sn/sn_feature_sets.h b/include/asm-ia64/sn/sn_feature_sets.h new file mode 100644 index 000000000000..e68a80853d5d --- /dev/null +++ b/include/asm-ia64/sn/sn_feature_sets.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef _ASM_IA64_SN_FEATURE_SETS_H | ||
2 | #define _ASM_IA64_SN_FEATURE_SETS_H | ||
3 | |||
4 | /* | ||
5 | * SN PROM Features | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. | ||
12 | */ | ||
13 | |||
14 | |||
15 | #include <asm/types.h> | ||
16 | #include <asm/bitops.h> | ||
17 | |||
18 | /* --------------------- PROM Features -----------------------------*/ | ||
19 | extern int sn_prom_feature_available(int id); | ||
20 | |||
21 | #define MAX_PROM_FEATURE_SETS 2 | ||
22 | |||
23 | /* | ||
24 | * The following defines features that may or may not be supported by the | ||
25 | * current PROM. The OS uses sn_prom_feature_available(feature) to test for | ||
26 | * the presence of a PROM feature. Down rev (old) PROMs will always test | ||
27 | * "false" for new features. | ||
28 | * | ||
29 | * Use: | ||
30 | * if (sn_prom_feature_available(PRF_FEATURE_XXX)) | ||
31 | * ... | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | * Example: feature XXX | ||
36 | */ | ||
37 | #define PRF_FEATURE_XXX 0 | ||
38 | |||
39 | |||
40 | |||
41 | /* --------------------- OS Features -------------------------------*/ | ||
42 | |||
43 | /* | ||
44 | * The following defines OS features that are optionally present in | ||
45 | * the operating system. | ||
46 | * During boot, PROM is notified of these features via a series of calls: | ||
47 | * | ||
48 | * ia64_sn_set_os_feature(feature1); | ||
49 | * | ||
50 | * Once enabled, a feature cannot be disabled. | ||
51 | * | ||
52 | * By default, features are disabled unless explicitly enabled. | ||
53 | */ | ||
54 | #define OSF_MCA_SLV_TO_OS_INIT_SLV 0 | ||
55 | #define OSF_FEAT_LOG_SBES 1 | ||
56 | |||
57 | #endif /* _ASM_IA64_SN_FEATURE_SETS_H */ | ||
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index e67825ad1930..fea35b33d4e4 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
@@ -80,6 +80,9 @@ | |||
80 | #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 | 80 | #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 |
81 | #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 | 81 | #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 |
82 | 82 | ||
83 | #define SN_SAL_GET_PROM_FEATURE_SET 0x02000065 | ||
84 | #define SN_SAL_SET_OS_FEATURE_SET 0x02000066 | ||
85 | |||
83 | /* | 86 | /* |
84 | * Service-specific constants | 87 | * Service-specific constants |
85 | */ | 88 | */ |
@@ -118,8 +121,8 @@ | |||
118 | /* | 121 | /* |
119 | * Error Handling Features | 122 | * Error Handling Features |
120 | */ | 123 | */ |
121 | #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 | 124 | #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete |
122 | #define SAL_ERR_FEAT_LOG_SBES 0x2 | 125 | #define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete |
123 | #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 | 126 | #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 |
124 | #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 | 127 | #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 |
125 | 128 | ||
@@ -152,12 +155,6 @@ sn_sal_rev(void) | |||
152 | } | 155 | } |
153 | 156 | ||
154 | /* | 157 | /* |
155 | * Specify the minimum PROM revsion required for this kernel. | ||
156 | * Note that they're stored in hex format... | ||
157 | */ | ||
158 | #define SN_SAL_MIN_VERSION 0x0404 | ||
159 | |||
160 | /* | ||
161 | * Returns the master console nasid, if the call fails, return an illegal | 158 | * Returns the master console nasid, if the call fails, return an illegal |
162 | * value. | 159 | * value. |
163 | */ | 160 | */ |
@@ -336,7 +333,7 @@ ia64_sn_plat_cpei_handler(void) | |||
336 | } | 333 | } |
337 | 334 | ||
338 | /* | 335 | /* |
339 | * Set Error Handling Features | 336 | * Set Error Handling Features (Obsolete) |
340 | */ | 337 | */ |
341 | static inline u64 | 338 | static inline u64 |
342 | ia64_sn_plat_set_error_handling_features(void) | 339 | ia64_sn_plat_set_error_handling_features(void) |
@@ -1052,4 +1049,25 @@ ia64_sn_is_fake_prom(void) | |||
1052 | return (rv.status == 0); | 1049 | return (rv.status == 0); |
1053 | } | 1050 | } |
1054 | 1051 | ||
1052 | static inline int | ||
1053 | ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set) | ||
1054 | { | ||
1055 | struct ia64_sal_retval rv; | ||
1056 | |||
1057 | SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0); | ||
1058 | if (rv.status != 0) | ||
1059 | return rv.status; | ||
1060 | *feature_set = rv.v0; | ||
1061 | return 0; | ||
1062 | } | ||
1063 | |||
1064 | static inline int | ||
1065 | ia64_sn_set_os_feature(int feature) | ||
1066 | { | ||
1067 | struct ia64_sal_retval rv; | ||
1068 | |||
1069 | SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0); | ||
1070 | return rv.status; | ||
1071 | } | ||
1072 | |||
1055 | #endif /* _ASM_IA64_SN_SN_SAL_H */ | 1073 | #endif /* _ASM_IA64_SN_SN_SAL_H */ |
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
@@ -17,28 +17,20 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
21 | volatile unsigned int lock; | ||
22 | #ifdef CONFIG_PREEMPT | ||
23 | unsigned int break_lock; | ||
24 | #endif | ||
25 | } spinlock_t; | ||
26 | |||
27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
29 | 21 | ||
30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
31 | /* | 23 | /* |
32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
36 | */ | 28 | */ |
37 | 29 | ||
38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
39 | 31 | ||
40 | static inline void | 32 | static inline void |
41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
42 | { | 34 | { |
43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
44 | 36 | ||
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
94 | #endif | 86 | #endif |
95 | } | 87 | } |
96 | 88 | ||
97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
98 | 90 | ||
99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
101 | barrier(); | 93 | barrier(); |
102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
103 | } | 95 | } |
104 | 96 | ||
105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
108 | do { \ | 100 | do { \ |
109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
@@ -117,29 +109,20 @@ do { \ | |||
117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
118 | } \ | 110 | } \ |
119 | } while (0) | 111 | } while (0) |
120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
122 | 114 | ||
123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
127 | typedef struct { | ||
128 | volatile unsigned int read_counter : 24; | ||
129 | volatile unsigned int write_lock : 8; | ||
130 | #ifdef CONFIG_PREEMPT | ||
131 | unsigned int break_lock; | ||
132 | #endif | ||
133 | } rwlock_t; | ||
134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
135 | 119 | ||
136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
139 | 122 | ||
140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
141 | do { \ | 124 | do { \ |
142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
143 | \ | 126 | \ |
144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -148,14 +131,14 @@ do { \ | |||
148 | } \ | 131 | } \ |
149 | } while (0) | 132 | } while (0) |
150 | 133 | ||
151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
152 | do { \ | 135 | do { \ |
153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
155 | } while (0) | 138 | } while (0) |
156 | 139 | ||
157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
159 | do { \ | 142 | do { \ |
160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
@@ -170,7 +153,7 @@ do { \ | |||
170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
171 | } while(0) | 154 | } while(0) |
172 | 155 | ||
173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
174 | ({ \ | 157 | ({ \ |
175 | register long result; \ | 158 | register long result; \ |
176 | \ | 159 | \ |
@@ -182,7 +165,7 @@ do { \ | |||
182 | (result == 0); \ | 165 | (result == 0); \ |
183 | }) | 166 | }) |
184 | 167 | ||
185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
186 | { | 169 | { |
187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
188 | barrier(); | 171 | barrier(); |
@@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
191 | 174 | ||
192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
193 | 176 | ||
194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
195 | ({ \ | 178 | ({ \ |
196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
203 | }) | 186 | }) |
204 | 187 | ||
205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
206 | ({ \ | 189 | ({ \ |
207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
211 | }) | 194 | }) |
212 | 195 | ||
213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
214 | { | 197 | { |
215 | barrier(); | 198 | barrier(); |
216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
@@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
218 | 201 | ||
219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
220 | 203 | ||
221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
222 | 205 | ||
223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/include/asm-ia64/spinlock_types.h b/include/asm-ia64/spinlock_types.h new file mode 100644 index 000000000000..474e46f1ab4a --- /dev/null +++ b/include/asm-ia64/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef _ASM_IA64_SPINLOCK_TYPES_H | ||
2 | #define _ASM_IA64_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int read_counter : 31; | ||
16 | volatile unsigned int write_lock : 1; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 33256db4a7cf..635235fa1e32 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h | |||
@@ -275,6 +275,7 @@ extern void ia64_load_extra (struct task_struct *task); | |||
275 | */ | 275 | */ |
276 | #define __ARCH_WANT_UNLOCKED_CTXSW | 276 | #define __ARCH_WANT_UNLOCKED_CTXSW |
277 | 277 | ||
278 | #define ARCH_HAS_PREFETCH_SWITCH_STACK | ||
278 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) | 279 | #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) |
279 | 280 | ||
280 | void cpu_idle_wait(void); | 281 | void cpu_idle_wait(void); |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 7dc8951708a3..cf4a950a0f4f 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #ifndef _ASM_IA64_THREAD_INFO_H | 5 | #ifndef _ASM_IA64_THREAD_INFO_H |
6 | #define _ASM_IA64_THREAD_INFO_H | 6 | #define _ASM_IA64_THREAD_INFO_H |
7 | 7 | ||
8 | #include <asm/offsets.h> | 8 | #include <asm/asm-offsets.h> |
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ptrace.h> | 10 | #include <asm/ptrace.h> |
11 | 11 | ||
@@ -76,6 +76,7 @@ struct thread_info { | |||
76 | #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ | 76 | #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ |
77 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 77 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
78 | #define TIF_MEMDIE 17 | 78 | #define TIF_MEMDIE 17 |
79 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ | ||
79 | 80 | ||
80 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 81 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
81 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 82 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
@@ -85,6 +86,7 @@ struct thread_info { | |||
85 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 86 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
86 | #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) | 87 | #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) |
87 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 88 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
89 | #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) | ||
88 | 90 | ||
89 | /* "work to do on user-return" bits */ | 91 | /* "work to do on user-return" bits */ |
90 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) | 92 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) |
diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h index 61426ad3ecdb..5df0276b0493 100644 --- a/include/asm-ia64/unwind.h +++ b/include/asm-ia64/unwind.h | |||
@@ -114,13 +114,6 @@ extern void unw_remove_unwind_table (void *handle); | |||
114 | */ | 114 | */ |
115 | extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); | 115 | extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); |
116 | 116 | ||
117 | /* | ||
118 | * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have | ||
119 | * be "adjacent" (no state modifications between pt-regs and switch-stack). | ||
120 | */ | ||
121 | extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, | ||
122 | struct pt_regs *pt, struct switch_stack *sw); | ||
123 | |||
124 | extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, | 117 | extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, |
125 | struct switch_stack *sw); | 118 | struct switch_stack *sw); |
126 | 119 | ||
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h index 6608d8371c50..7de7def28da9 100644 --- a/include/asm-m32r/spinlock.h +++ b/include/asm-m32r/spinlock.h | |||
@@ -14,57 +14,30 @@ | |||
14 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
16 | 16 | ||
17 | extern int printk(const char * fmt, ...) | ||
18 | __attribute__ ((format (printf, 1, 2))); | ||
19 | |||
20 | #define RW_LOCK_BIAS 0x01000000 | ||
21 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
22 | |||
23 | /* | 17 | /* |
24 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 18 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
25 | */ | 19 | * |
26 | 20 | * (the type definitions are in asm/spinlock_types.h) | |
27 | typedef struct { | 21 | * |
28 | volatile int slock; | ||
29 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
30 | unsigned magic; | ||
31 | #endif | ||
32 | #ifdef CONFIG_PREEMPT | ||
33 | unsigned int break_lock; | ||
34 | #endif | ||
35 | } spinlock_t; | ||
36 | |||
37 | #define SPINLOCK_MAGIC 0xdead4ead | ||
38 | |||
39 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
40 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
41 | #else | ||
42 | #define SPINLOCK_MAGIC_INIT /* */ | ||
43 | #endif | ||
44 | |||
45 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
46 | |||
47 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
48 | |||
49 | /* | ||
50 | * Simple spin lock operations. There are two variants, one clears IRQ's | 22 | * Simple spin lock operations. There are two variants, one clears IRQ's |
51 | * on the local processor, one does not. | 23 | * on the local processor, one does not. |
52 | * | 24 | * |
53 | * We make no fairness assumptions. They have a cost. | 25 | * We make no fairness assumptions. They have a cost. |
54 | */ | 26 | */ |
55 | 27 | ||
56 | #define spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 28 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
57 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 29 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
58 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 30 | #define __raw_spin_unlock_wait(x) \ |
31 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
59 | 32 | ||
60 | /** | 33 | /** |
61 | * _raw_spin_trylock - Try spin lock and return a result | 34 | * __raw_spin_trylock - Try spin lock and return a result |
62 | * @lock: Pointer to the lock variable | 35 | * @lock: Pointer to the lock variable |
63 | * | 36 | * |
64 | * _raw_spin_trylock() tries to get the lock and returns a result. | 37 | * __raw_spin_trylock() tries to get the lock and returns a result. |
65 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 38 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
66 | */ | 39 | */ |
67 | static inline int _raw_spin_trylock(spinlock_t *lock) | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
68 | { | 41 | { |
69 | int oldval; | 42 | int oldval; |
70 | unsigned long tmp1, tmp2; | 43 | unsigned long tmp1, tmp2; |
@@ -78,7 +51,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
78 | * } | 51 | * } |
79 | */ | 52 | */ |
80 | __asm__ __volatile__ ( | 53 | __asm__ __volatile__ ( |
81 | "# spin_trylock \n\t" | 54 | "# __raw_spin_trylock \n\t" |
82 | "ldi %1, #0; \n\t" | 55 | "ldi %1, #0; \n\t" |
83 | "mvfc %2, psw; \n\t" | 56 | "mvfc %2, psw; \n\t" |
84 | "clrpsw #0x40 -> nop; \n\t" | 57 | "clrpsw #0x40 -> nop; \n\t" |
@@ -97,16 +70,10 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
97 | return (oldval > 0); | 70 | return (oldval > 0); |
98 | } | 71 | } |
99 | 72 | ||
100 | static inline void _raw_spin_lock(spinlock_t *lock) | 73 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
101 | { | 74 | { |
102 | unsigned long tmp0, tmp1; | 75 | unsigned long tmp0, tmp1; |
103 | 76 | ||
104 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
105 | if (unlikely(lock->magic != SPINLOCK_MAGIC)) { | ||
106 | printk("pc: %p\n", __builtin_return_address(0)); | ||
107 | BUG(); | ||
108 | } | ||
109 | #endif | ||
110 | /* | 77 | /* |
111 | * lock->slock : =1 : unlock | 78 | * lock->slock : =1 : unlock |
112 | * : <=0 : lock | 79 | * : <=0 : lock |
@@ -118,7 +85,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
118 | * } | 85 | * } |
119 | */ | 86 | */ |
120 | __asm__ __volatile__ ( | 87 | __asm__ __volatile__ ( |
121 | "# spin_lock \n\t" | 88 | "# __raw_spin_lock \n\t" |
122 | ".fillinsn \n" | 89 | ".fillinsn \n" |
123 | "1: \n\t" | 90 | "1: \n\t" |
124 | "mvfc %1, psw; \n\t" | 91 | "mvfc %1, psw; \n\t" |
@@ -145,12 +112,8 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
145 | ); | 112 | ); |
146 | } | 113 | } |
147 | 114 | ||
148 | static inline void _raw_spin_unlock(spinlock_t *lock) | 115 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
149 | { | 116 | { |
150 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
151 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
152 | BUG_ON(!spin_is_locked(lock)); | ||
153 | #endif | ||
154 | mb(); | 117 | mb(); |
155 | lock->slock = 1; | 118 | lock->slock = 1; |
156 | } | 119 | } |
@@ -164,59 +127,32 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
164 | * can "mix" irq-safe locks - any writer needs to get a | 127 | * can "mix" irq-safe locks - any writer needs to get a |
165 | * irq-safe write-lock, but readers can get non-irqsafe | 128 | * irq-safe write-lock, but readers can get non-irqsafe |
166 | * read-locks. | 129 | * read-locks. |
130 | * | ||
131 | * On x86, we implement read-write locks as a 32-bit counter | ||
132 | * with the high bit (sign) being the "contended" bit. | ||
133 | * | ||
134 | * The inline assembly is non-obvious. Think about it. | ||
135 | * | ||
136 | * Changed to use the same technique as rw semaphores. See | ||
137 | * semaphore.h for details. -ben | ||
167 | */ | 138 | */ |
168 | typedef struct { | ||
169 | volatile int lock; | ||
170 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
171 | unsigned magic; | ||
172 | #endif | ||
173 | #ifdef CONFIG_PREEMPT | ||
174 | unsigned int break_lock; | ||
175 | #endif | ||
176 | } rwlock_t; | ||
177 | |||
178 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
179 | |||
180 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
181 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
182 | #else | ||
183 | #define RWLOCK_MAGIC_INIT /* */ | ||
184 | #endif | ||
185 | |||
186 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
187 | |||
188 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
189 | 139 | ||
190 | /** | 140 | /** |
191 | * read_can_lock - would read_trylock() succeed? | 141 | * read_can_lock - would read_trylock() succeed? |
192 | * @lock: the rwlock in question. | 142 | * @lock: the rwlock in question. |
193 | */ | 143 | */ |
194 | #define read_can_lock(x) ((int)(x)->lock > 0) | 144 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
195 | 145 | ||
196 | /** | 146 | /** |
197 | * write_can_lock - would write_trylock() succeed? | 147 | * write_can_lock - would write_trylock() succeed? |
198 | * @lock: the rwlock in question. | 148 | * @lock: the rwlock in question. |
199 | */ | 149 | */ |
200 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 150 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
201 | |||
202 | /* | ||
203 | * On x86, we implement read-write locks as a 32-bit counter | ||
204 | * with the high bit (sign) being the "contended" bit. | ||
205 | * | ||
206 | * The inline assembly is non-obvious. Think about it. | ||
207 | * | ||
208 | * Changed to use the same technique as rw semaphores. See | ||
209 | * semaphore.h for details. -ben | ||
210 | */ | ||
211 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
212 | 151 | ||
213 | static inline void _raw_read_lock(rwlock_t *rw) | 152 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
214 | { | 153 | { |
215 | unsigned long tmp0, tmp1; | 154 | unsigned long tmp0, tmp1; |
216 | 155 | ||
217 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
218 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
219 | #endif | ||
220 | /* | 156 | /* |
221 | * rw->lock : >0 : unlock | 157 | * rw->lock : >0 : unlock |
222 | * : <=0 : lock | 158 | * : <=0 : lock |
@@ -264,13 +200,10 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
264 | ); | 200 | ); |
265 | } | 201 | } |
266 | 202 | ||
267 | static inline void _raw_write_lock(rwlock_t *rw) | 203 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
268 | { | 204 | { |
269 | unsigned long tmp0, tmp1, tmp2; | 205 | unsigned long tmp0, tmp1, tmp2; |
270 | 206 | ||
271 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
272 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
273 | #endif | ||
274 | /* | 207 | /* |
275 | * rw->lock : =RW_LOCK_BIAS_STR : unlock | 208 | * rw->lock : =RW_LOCK_BIAS_STR : unlock |
276 | * : !=RW_LOCK_BIAS_STR : lock | 209 | * : !=RW_LOCK_BIAS_STR : lock |
@@ -320,7 +253,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
320 | ); | 253 | ); |
321 | } | 254 | } |
322 | 255 | ||
323 | static inline void _raw_read_unlock(rwlock_t *rw) | 256 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
324 | { | 257 | { |
325 | unsigned long tmp0, tmp1; | 258 | unsigned long tmp0, tmp1; |
326 | 259 | ||
@@ -342,7 +275,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
342 | ); | 275 | ); |
343 | } | 276 | } |
344 | 277 | ||
345 | static inline void _raw_write_unlock(rwlock_t *rw) | 278 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
346 | { | 279 | { |
347 | unsigned long tmp0, tmp1, tmp2; | 280 | unsigned long tmp0, tmp1, tmp2; |
348 | 281 | ||
@@ -366,9 +299,9 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
366 | ); | 299 | ); |
367 | } | 300 | } |
368 | 301 | ||
369 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 302 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
370 | 303 | ||
371 | static inline int _raw_write_trylock(rwlock_t *lock) | 304 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
372 | { | 305 | { |
373 | atomic_t *count = (atomic_t *)lock; | 306 | atomic_t *count = (atomic_t *)lock; |
374 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 307 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
diff --git a/include/asm-m32r/spinlock_types.h b/include/asm-m32r/spinlock_types.h new file mode 100644 index 000000000000..7e9941c45f40 --- /dev/null +++ b/include/asm-m32r/spinlock_types.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ASM_M32R_SPINLOCK_TYPES_H | ||
2 | #define _ASM_M32R_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define RW_LOCK_BIAS 0x01000000 | ||
19 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
20 | |||
21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
22 | |||
23 | #endif | ||
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h index f95e32b40425..c42f88a9b9f9 100644 --- a/include/asm-m68knommu/bitops.h +++ b/include/asm-m68knommu/bitops.h | |||
@@ -259,7 +259,7 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | |||
259 | #define find_first_bit(addr, size) \ | 259 | #define find_first_bit(addr, size) \ |
260 | find_next_bit((addr), (size), 0) | 260 | find_next_bit((addr), (size), 0) |
261 | 261 | ||
262 | static __inline__ int find_next_zero_bit (void * addr, int size, int offset) | 262 | static __inline__ int find_next_zero_bit (const void * addr, int size, int offset) |
263 | { | 263 | { |
264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | 264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); |
265 | unsigned long result = offset & ~31UL; | 265 | unsigned long result = offset & ~31UL; |
diff --git a/include/asm-m68knommu/checksum.h b/include/asm-m68knommu/checksum.h index 92cf102c2534..294ec7583ac9 100644 --- a/include/asm-m68knommu/checksum.h +++ b/include/asm-m68knommu/checksum.h | |||
@@ -25,7 +25,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
25 | * better 64-bit) boundary | 25 | * better 64-bit) boundary |
26 | */ | 26 | */ |
27 | 27 | ||
28 | unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum); | 28 | unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, |
29 | int len, int sum); | ||
29 | 30 | ||
30 | 31 | ||
31 | /* | 32 | /* |
@@ -35,8 +36,8 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum); | |||
35 | * better 64-bit) boundary | 36 | * better 64-bit) boundary |
36 | */ | 37 | */ |
37 | 38 | ||
38 | extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, | 39 | extern unsigned int csum_partial_copy_from_user(const unsigned char *src, |
39 | int len, int sum, int *csum_err); | 40 | unsigned char *dst, int len, int sum, int *csum_err); |
40 | 41 | ||
41 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ | 42 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ |
42 | csum_partial_copy((src), (dst), (len), (sum)) | 43 | csum_partial_copy((src), (dst), (len), (sum)) |
diff --git a/include/asm-m68knommu/m527xsim.h b/include/asm-m68knommu/m527xsim.h index d280d013da03..e7878d0f7d7a 100644 --- a/include/asm-m68knommu/m527xsim.h +++ b/include/asm-m68knommu/m527xsim.h | |||
@@ -37,13 +37,14 @@ | |||
37 | /* | 37 | /* |
38 | * SDRAM configuration registers. | 38 | * SDRAM configuration registers. |
39 | */ | 39 | */ |
40 | #ifdef CONFIG_M5271EVB | 40 | #ifdef CONFIG_M5271 |
41 | #define MCFSIM_DCR 0x40 /* SDRAM control */ | 41 | #define MCFSIM_DCR 0x40 /* SDRAM control */ |
42 | #define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ | 42 | #define MCFSIM_DACR0 0x48 /* SDRAM base address 0 */ |
43 | #define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ | 43 | #define MCFSIM_DMR0 0x4c /* SDRAM address mask 0 */ |
44 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ | 44 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ |
45 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ | 45 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ |
46 | #else | 46 | #endif |
47 | #ifdef CONFIG_M5275 | ||
47 | #define MCFSIM_DMR 0x40 /* SDRAM mode */ | 48 | #define MCFSIM_DMR 0x40 /* SDRAM mode */ |
48 | #define MCFSIM_DCR 0x44 /* SDRAM control */ | 49 | #define MCFSIM_DCR 0x44 /* SDRAM control */ |
49 | #define MCFSIM_DCFG1 0x48 /* SDRAM configuration 1 */ | 50 | #define MCFSIM_DCFG1 0x48 /* SDRAM configuration 1 */ |
@@ -54,5 +55,21 @@ | |||
54 | #define MCFSIM_DMR1 0x5c /* SDRAM address mask 1 */ | 55 | #define MCFSIM_DMR1 0x5c /* SDRAM address mask 1 */ |
55 | #endif | 56 | #endif |
56 | 57 | ||
58 | /* | ||
59 | * GPIO pins setups to enable the UARTs. | ||
60 | */ | ||
61 | #ifdef CONFIG_M5271 | ||
62 | #define MCF_GPIO_PAR_UART 0x100048 /* PAR UART address */ | ||
63 | #define UART0_ENABLE_MASK 0x000f | ||
64 | #define UART1_ENABLE_MASK 0x0ff0 | ||
65 | #define UART2_ENABLE_MASK 0x3000 | ||
66 | #endif | ||
67 | #ifdef CONFIG_M5275 | ||
68 | #define MCF_GPIO_PAR_UART 0x10007c /* PAR UART address */ | ||
69 | #define UART0_ENABLE_MASK 0x000f | ||
70 | #define UART1_ENABLE_MASK 0x00f0 | ||
71 | #define UART2_ENABLE_MASK 0x3f00 | ||
72 | #endif | ||
73 | |||
57 | /****************************************************************************/ | 74 | /****************************************************************************/ |
58 | #endif /* m527xsim_h */ | 75 | #endif /* m527xsim_h */ |
diff --git a/include/asm-m68knommu/m528xsim.h b/include/asm-m68knommu/m528xsim.h index 371993a206ac..610774a17f70 100644 --- a/include/asm-m68knommu/m528xsim.h +++ b/include/asm-m68knommu/m528xsim.h | |||
@@ -41,5 +41,117 @@ | |||
41 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ | 41 | #define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */ |
42 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ | 42 | #define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */ |
43 | 43 | ||
44 | /* | ||
45 | * Derek Cheung - 6 Feb 2005 | ||
46 | * add I2C and QSPI register definition using Freescale's MCF5282 | ||
47 | */ | ||
48 | /* set Port AS pin for I2C or UART */ | ||
49 | #define MCF5282_GPIO_PASPAR (volatile u16 *) (MCF_IPSBAR + 0x00100056) | ||
50 | |||
51 | /* Interrupt Mask Register Register Low */ | ||
52 | #define MCF5282_INTC0_IMRL (volatile u32 *) (MCF_IPSBAR + 0x0C0C) | ||
53 | /* Interrupt Control Register 7 */ | ||
54 | #define MCF5282_INTC0_ICR17 (volatile u8 *) (MCF_IPSBAR + 0x0C51) | ||
55 | |||
56 | |||
57 | |||
58 | /********************************************************************* | ||
59 | * | ||
60 | * Inter-IC (I2C) Module | ||
61 | * | ||
62 | *********************************************************************/ | ||
63 | /* Read/Write access macros for general use */ | ||
64 | #define MCF5282_I2C_I2ADR (volatile u8 *) (MCF_IPSBAR + 0x0300) // Address | ||
65 | #define MCF5282_I2C_I2FDR (volatile u8 *) (MCF_IPSBAR + 0x0304) // Freq Divider | ||
66 | #define MCF5282_I2C_I2CR (volatile u8 *) (MCF_IPSBAR + 0x0308) // Control | ||
67 | #define MCF5282_I2C_I2SR (volatile u8 *) (MCF_IPSBAR + 0x030C) // Status | ||
68 | #define MCF5282_I2C_I2DR (volatile u8 *) (MCF_IPSBAR + 0x0310) // Data I/O | ||
69 | |||
70 | /* Bit level definitions and macros */ | ||
71 | #define MCF5282_I2C_I2ADR_ADDR(x) (((x)&0x7F)<<0x01) | ||
72 | |||
73 | #define MCF5282_I2C_I2FDR_IC(x) (((x)&0x3F)) | ||
74 | |||
75 | #define MCF5282_I2C_I2CR_IEN (0x80) // I2C enable | ||
76 | #define MCF5282_I2C_I2CR_IIEN (0x40) // interrupt enable | ||
77 | #define MCF5282_I2C_I2CR_MSTA (0x20) // master/slave mode | ||
78 | #define MCF5282_I2C_I2CR_MTX (0x10) // transmit/receive mode | ||
79 | #define MCF5282_I2C_I2CR_TXAK (0x08) // transmit acknowledge enable | ||
80 | #define MCF5282_I2C_I2CR_RSTA (0x04) // repeat start | ||
81 | |||
82 | #define MCF5282_I2C_I2SR_ICF (0x80) // data transfer bit | ||
83 | #define MCF5282_I2C_I2SR_IAAS (0x40) // I2C addressed as a slave | ||
84 | #define MCF5282_I2C_I2SR_IBB (0x20) // I2C bus busy | ||
85 | #define MCF5282_I2C_I2SR_IAL (0x10) // aribitration lost | ||
86 | #define MCF5282_I2C_I2SR_SRW (0x04) // slave read/write | ||
87 | #define MCF5282_I2C_I2SR_IIF (0x02) // I2C interrupt | ||
88 | #define MCF5282_I2C_I2SR_RXAK (0x01) // received acknowledge | ||
89 | |||
90 | |||
91 | |||
92 | /********************************************************************* | ||
93 | * | ||
94 | * Queued Serial Peripheral Interface (QSPI) Module | ||
95 | * | ||
96 | *********************************************************************/ | ||
97 | /* Derek - 21 Feb 2005 */ | ||
98 | /* change to the format used in I2C */ | ||
99 | /* Read/Write access macros for general use */ | ||
100 | #define MCF5282_QSPI_QMR MCF_IPSBAR + 0x0340 | ||
101 | #define MCF5282_QSPI_QDLYR MCF_IPSBAR + 0x0344 | ||
102 | #define MCF5282_QSPI_QWR MCF_IPSBAR + 0x0348 | ||
103 | #define MCF5282_QSPI_QIR MCF_IPSBAR + 0x034C | ||
104 | #define MCF5282_QSPI_QAR MCF_IPSBAR + 0x0350 | ||
105 | #define MCF5282_QSPI_QDR MCF_IPSBAR + 0x0354 | ||
106 | #define MCF5282_QSPI_QCR MCF_IPSBAR + 0x0354 | ||
107 | |||
108 | /* Bit level definitions and macros */ | ||
109 | #define MCF5282_QSPI_QMR_MSTR (0x8000) | ||
110 | #define MCF5282_QSPI_QMR_DOHIE (0x4000) | ||
111 | #define MCF5282_QSPI_QMR_BITS_16 (0x0000) | ||
112 | #define MCF5282_QSPI_QMR_BITS_8 (0x2000) | ||
113 | #define MCF5282_QSPI_QMR_BITS_9 (0x2400) | ||
114 | #define MCF5282_QSPI_QMR_BITS_10 (0x2800) | ||
115 | #define MCF5282_QSPI_QMR_BITS_11 (0x2C00) | ||
116 | #define MCF5282_QSPI_QMR_BITS_12 (0x3000) | ||
117 | #define MCF5282_QSPI_QMR_BITS_13 (0x3400) | ||
118 | #define MCF5282_QSPI_QMR_BITS_14 (0x3800) | ||
119 | #define MCF5282_QSPI_QMR_BITS_15 (0x3C00) | ||
120 | #define MCF5282_QSPI_QMR_CPOL (0x0200) | ||
121 | #define MCF5282_QSPI_QMR_CPHA (0x0100) | ||
122 | #define MCF5282_QSPI_QMR_BAUD(x) (((x)&0x00FF)) | ||
123 | |||
124 | #define MCF5282_QSPI_QDLYR_SPE (0x80) | ||
125 | #define MCF5282_QSPI_QDLYR_QCD(x) (((x)&0x007F)<<8) | ||
126 | #define MCF5282_QSPI_QDLYR_DTL(x) (((x)&0x00FF)) | ||
127 | |||
128 | #define MCF5282_QSPI_QWR_HALT (0x8000) | ||
129 | #define MCF5282_QSPI_QWR_WREN (0x4000) | ||
130 | #define MCF5282_QSPI_QWR_WRTO (0x2000) | ||
131 | #define MCF5282_QSPI_QWR_CSIV (0x1000) | ||
132 | #define MCF5282_QSPI_QWR_ENDQP(x) (((x)&0x000F)<<8) | ||
133 | #define MCF5282_QSPI_QWR_CPTQP(x) (((x)&0x000F)<<4) | ||
134 | #define MCF5282_QSPI_QWR_NEWQP(x) (((x)&0x000F)) | ||
135 | |||
136 | #define MCF5282_QSPI_QIR_WCEFB (0x8000) | ||
137 | #define MCF5282_QSPI_QIR_ABRTB (0x4000) | ||
138 | #define MCF5282_QSPI_QIR_ABRTL (0x1000) | ||
139 | #define MCF5282_QSPI_QIR_WCEFE (0x0800) | ||
140 | #define MCF5282_QSPI_QIR_ABRTE (0x0400) | ||
141 | #define MCF5282_QSPI_QIR_SPIFE (0x0100) | ||
142 | #define MCF5282_QSPI_QIR_WCEF (0x0008) | ||
143 | #define MCF5282_QSPI_QIR_ABRT (0x0004) | ||
144 | #define MCF5282_QSPI_QIR_SPIF (0x0001) | ||
145 | |||
146 | #define MCF5282_QSPI_QAR_ADDR(x) (((x)&0x003F)) | ||
147 | |||
148 | #define MCF5282_QSPI_QDR_COMMAND(x) (((x)&0xFF00)) | ||
149 | #define MCF5282_QSPI_QCR_DATA(x) (((x)&0x00FF)<<8) | ||
150 | #define MCF5282_QSPI_QCR_CONT (0x8000) | ||
151 | #define MCF5282_QSPI_QCR_BITSE (0x4000) | ||
152 | #define MCF5282_QSPI_QCR_DT (0x2000) | ||
153 | #define MCF5282_QSPI_QCR_DSCK (0x1000) | ||
154 | #define MCF5282_QSPI_QCR_CS (((x)&0x000F)<<8) | ||
155 | |||
44 | /****************************************************************************/ | 156 | /****************************************************************************/ |
45 | #endif /* m528xsim_h */ | 157 | #endif /* m528xsim_h */ |
diff --git a/include/asm-m68knommu/mcfcache.h b/include/asm-m68knommu/mcfcache.h index bdd8c53ef34c..b17cd920977f 100644 --- a/include/asm-m68knommu/mcfcache.h +++ b/include/asm-m68knommu/mcfcache.h | |||
@@ -33,7 +33,7 @@ | |||
33 | .endm | 33 | .endm |
34 | #endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */ | 34 | #endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */ |
35 | 35 | ||
36 | #if defined(CONFIG_M527x) | 36 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) |
37 | /* | 37 | /* |
38 | * New version 2 cores have a configurable split cache arrangement. | 38 | * New version 2 cores have a configurable split cache arrangement. |
39 | * For now I am just enabling instruction cache - but ultimately I | 39 | * For now I am just enabling instruction cache - but ultimately I |
@@ -51,23 +51,20 @@ | |||
51 | movec %d0,%CACR /* enable cache */ | 51 | movec %d0,%CACR /* enable cache */ |
52 | nop | 52 | nop |
53 | .endm | 53 | .endm |
54 | #endif /* CONFIG_M527x */ | 54 | #endif /* CONFIG_M523x || CONFIG_M527x */ |
55 | 55 | ||
56 | #if defined(CONFIG_M528x) | 56 | #if defined(CONFIG_M528x) |
57 | /* | ||
58 | * Cache is totally broken on early 5282 silicon. So far now we | ||
59 | * disable its cache all together. | ||
60 | */ | ||
61 | .macro CACHE_ENABLE | 57 | .macro CACHE_ENABLE |
62 | movel #0x01000000,%d0 | ||
63 | movec %d0,%CACR /* invalidate cache */ | ||
64 | nop | 58 | nop |
65 | movel #0x0000c000,%d0 /* set SDRAM cached only */ | 59 | movel #0x01000000, %d0 |
66 | movec %d0,%ACR0 | 60 | movec %d0, %CACR /* Invalidate cache */ |
67 | movel #0x00000000,%d0 /* no other regions cached */ | 61 | nop |
68 | movec %d0,%ACR1 | 62 | movel #0x0000c020, %d0 /* Set SDRAM cached only */ |
69 | movel #0x00000000,%d0 /* configure cache */ | 63 | movec %d0, %ACR0 |
70 | movec %d0,%CACR /* enable cache */ | 64 | movel #0xff00c000, %d0 /* Cache Flash also */ |
65 | movec %d0, %ACR1 | ||
66 | movel #0x80000200, %d0 /* Setup cache mask */ | ||
67 | movec %d0, %CACR /* Enable cache */ | ||
71 | nop | 68 | nop |
72 | .endm | 69 | .endm |
73 | #endif /* CONFIG_M528x */ | 70 | #endif /* CONFIG_M528x */ |
diff --git a/include/asm-m68knommu/mcfdma.h b/include/asm-m68knommu/mcfdma.h index 350c6090b5c1..b93f8ba8a248 100644 --- a/include/asm-m68knommu/mcfdma.h +++ b/include/asm-m68knommu/mcfdma.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #define MCFDMA_BASE1 0x240 /* Base address of DMA 1 */ | 21 | #define MCFDMA_BASE1 0x240 /* Base address of DMA 1 */ |
22 | #elif defined(CONFIG_M5272) | 22 | #elif defined(CONFIG_M5272) |
23 | #define MCFDMA_BASE0 0x0e0 /* Base address of DMA 0 */ | 23 | #define MCFDMA_BASE0 0x0e0 /* Base address of DMA 0 */ |
24 | #elif defined(CONFIG_M527x) || defined(CONFIG_M528x) | 24 | #elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) |
25 | /* These are relative to the IPSBAR, not MBAR */ | 25 | /* These are relative to the IPSBAR, not MBAR */ |
26 | #define MCFDMA_BASE0 0x100 /* Base address of DMA 0 */ | 26 | #define MCFDMA_BASE0 0x100 /* Base address of DMA 0 */ |
27 | #define MCFDMA_BASE1 0x140 /* Base address of DMA 1 */ | 27 | #define MCFDMA_BASE1 0x140 /* Base address of DMA 1 */ |
diff --git a/include/asm-mips/asmmacro-32.h b/include/asm-mips/asmmacro-32.h index ac8823df2554..11daf5ceb7b4 100644 --- a/include/asm-mips/asmmacro-32.h +++ b/include/asm-mips/asmmacro-32.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #ifndef _ASM_ASMMACRO_32_H | 7 | #ifndef _ASM_ASMMACRO_32_H |
8 | #define _ASM_ASMMACRO_32_H | 8 | #define _ASM_ASMMACRO_32_H |
9 | 9 | ||
10 | #include <asm/offset.h> | 10 | #include <asm/asm-offsets.h> |
11 | #include <asm/regdef.h> | 11 | #include <asm/regdef.h> |
12 | #include <asm/fpregdef.h> | 12 | #include <asm/fpregdef.h> |
13 | #include <asm/mipsregs.h> | 13 | #include <asm/mipsregs.h> |
diff --git a/include/asm-mips/asmmacro-64.h b/include/asm-mips/asmmacro-64.h index bbed35511f5a..559c355b9b86 100644 --- a/include/asm-mips/asmmacro-64.h +++ b/include/asm-mips/asmmacro-64.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #ifndef _ASM_ASMMACRO_64_H | 8 | #ifndef _ASM_ASMMACRO_64_H |
9 | #define _ASM_ASMMACRO_64_H | 9 | #define _ASM_ASMMACRO_64_H |
10 | 10 | ||
11 | #include <asm/offset.h> | 11 | #include <asm/asm-offsets.h> |
12 | #include <asm/regdef.h> | 12 | #include <asm/regdef.h> |
13 | #include <asm/fpregdef.h> | 13 | #include <asm/fpregdef.h> |
14 | #include <asm/mipsregs.h> | 14 | #include <asm/mipsregs.h> |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index b90b11d0b886..3f2470e9e678 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -49,7 +49,4 @@ do { \ | |||
49 | 49 | ||
50 | extern void arch_init_irq(void); | 50 | extern void arch_init_irq(void); |
51 | 51 | ||
52 | struct irqaction; | ||
53 | int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); | ||
54 | |||
55 | #endif /* _ASM_IRQ_H */ | 52 | #endif /* _ASM_IRQ_H */ |
diff --git a/include/asm-mips/sim.h b/include/asm-mips/sim.h index 3ccfe09fa744..9c2af1b00e19 100644 --- a/include/asm-mips/sim.h +++ b/include/asm-mips/sim.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | 13 | ||
14 | #include <asm/offset.h> | 14 | #include <asm/asm-offsets.h> |
15 | 15 | ||
16 | #define __str2(x) #x | 16 | #define __str2(x) #x |
17 | #define __str(x) __str2(x) | 17 | #define __str(x) __str2(x) |
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index 114d3eb98a6a..4d0135b11156 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h | |||
@@ -16,20 +16,10 @@ | |||
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
17 | */ | 17 | */ |
18 | 18 | ||
19 | typedef struct { | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
20 | volatile unsigned int lock; | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
21 | #ifdef CONFIG_PREEMPT | 21 | #define __raw_spin_unlock_wait(x) \ |
22 | unsigned int break_lock; | 22 | do { cpu_relax(); } while ((x)->lock) |
23 | #endif | ||
24 | } spinlock_t; | ||
25 | |||
26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
27 | |||
28 | #define spin_lock_init(x) do { (x)->lock = 0; } while(0) | ||
29 | |||
30 | #define spin_is_locked(x) ((x)->lock != 0) | ||
31 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | ||
32 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
33 | 23 | ||
34 | /* | 24 | /* |
35 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -38,13 +28,13 @@ typedef struct { | |||
38 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
39 | */ | 29 | */ |
40 | 30 | ||
41 | static inline void _raw_spin_lock(spinlock_t *lock) | 31 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
42 | { | 32 | { |
43 | unsigned int tmp; | 33 | unsigned int tmp; |
44 | 34 | ||
45 | if (R10000_LLSC_WAR) { | 35 | if (R10000_LLSC_WAR) { |
46 | __asm__ __volatile__( | 36 | __asm__ __volatile__( |
47 | " .set noreorder # _raw_spin_lock \n" | 37 | " .set noreorder # __raw_spin_lock \n" |
48 | "1: ll %1, %2 \n" | 38 | "1: ll %1, %2 \n" |
49 | " bnez %1, 1b \n" | 39 | " bnez %1, 1b \n" |
50 | " li %1, 1 \n" | 40 | " li %1, 1 \n" |
@@ -58,7 +48,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
58 | : "memory"); | 48 | : "memory"); |
59 | } else { | 49 | } else { |
60 | __asm__ __volatile__( | 50 | __asm__ __volatile__( |
61 | " .set noreorder # _raw_spin_lock \n" | 51 | " .set noreorder # __raw_spin_lock \n" |
62 | "1: ll %1, %2 \n" | 52 | "1: ll %1, %2 \n" |
63 | " bnez %1, 1b \n" | 53 | " bnez %1, 1b \n" |
64 | " li %1, 1 \n" | 54 | " li %1, 1 \n" |
@@ -72,10 +62,10 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
72 | } | 62 | } |
73 | } | 63 | } |
74 | 64 | ||
75 | static inline void _raw_spin_unlock(spinlock_t *lock) | 65 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
76 | { | 66 | { |
77 | __asm__ __volatile__( | 67 | __asm__ __volatile__( |
78 | " .set noreorder # _raw_spin_unlock \n" | 68 | " .set noreorder # __raw_spin_unlock \n" |
79 | " sync \n" | 69 | " sync \n" |
80 | " sw $0, %0 \n" | 70 | " sw $0, %0 \n" |
81 | " .set\treorder \n" | 71 | " .set\treorder \n" |
@@ -84,13 +74,13 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
84 | : "memory"); | 74 | : "memory"); |
85 | } | 75 | } |
86 | 76 | ||
87 | static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | 77 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) |
88 | { | 78 | { |
89 | unsigned int temp, res; | 79 | unsigned int temp, res; |
90 | 80 | ||
91 | if (R10000_LLSC_WAR) { | 81 | if (R10000_LLSC_WAR) { |
92 | __asm__ __volatile__( | 82 | __asm__ __volatile__( |
93 | " .set noreorder # _raw_spin_trylock \n" | 83 | " .set noreorder # __raw_spin_trylock \n" |
94 | "1: ll %0, %3 \n" | 84 | "1: ll %0, %3 \n" |
95 | " ori %2, %0, 1 \n" | 85 | " ori %2, %0, 1 \n" |
96 | " sc %2, %1 \n" | 86 | " sc %2, %1 \n" |
@@ -104,7 +94,7 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
104 | : "memory"); | 94 | : "memory"); |
105 | } else { | 95 | } else { |
106 | __asm__ __volatile__( | 96 | __asm__ __volatile__( |
107 | " .set noreorder # _raw_spin_trylock \n" | 97 | " .set noreorder # __raw_spin_trylock \n" |
108 | "1: ll %0, %3 \n" | 98 | "1: ll %0, %3 \n" |
109 | " ori %2, %0, 1 \n" | 99 | " ori %2, %0, 1 \n" |
110 | " sc %2, %1 \n" | 100 | " sc %2, %1 \n" |
@@ -129,24 +119,13 @@ static inline unsigned int _raw_spin_trylock(spinlock_t *lock) | |||
129 | * read-locks. | 119 | * read-locks. |
130 | */ | 120 | */ |
131 | 121 | ||
132 | typedef struct { | 122 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
133 | volatile unsigned int lock; | ||
134 | #ifdef CONFIG_PREEMPT | ||
135 | unsigned int break_lock; | ||
136 | #endif | ||
137 | } rwlock_t; | ||
138 | |||
139 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
140 | |||
141 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
142 | |||
143 | static inline void _raw_read_lock(rwlock_t *rw) | ||
144 | { | 123 | { |
145 | unsigned int tmp; | 124 | unsigned int tmp; |
146 | 125 | ||
147 | if (R10000_LLSC_WAR) { | 126 | if (R10000_LLSC_WAR) { |
148 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
149 | " .set noreorder # _raw_read_lock \n" | 128 | " .set noreorder # __raw_read_lock \n" |
150 | "1: ll %1, %2 \n" | 129 | "1: ll %1, %2 \n" |
151 | " bltz %1, 1b \n" | 130 | " bltz %1, 1b \n" |
152 | " addu %1, 1 \n" | 131 | " addu %1, 1 \n" |
@@ -160,7 +139,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
160 | : "memory"); | 139 | : "memory"); |
161 | } else { | 140 | } else { |
162 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
163 | " .set noreorder # _raw_read_lock \n" | 142 | " .set noreorder # __raw_read_lock \n" |
164 | "1: ll %1, %2 \n" | 143 | "1: ll %1, %2 \n" |
165 | " bltz %1, 1b \n" | 144 | " bltz %1, 1b \n" |
166 | " addu %1, 1 \n" | 145 | " addu %1, 1 \n" |
@@ -177,13 +156,13 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
177 | /* Note the use of sub, not subu which will make the kernel die with an | 156 | /* Note the use of sub, not subu which will make the kernel die with an |
178 | overflow exception if we ever try to unlock an rwlock that is already | 157 | overflow exception if we ever try to unlock an rwlock that is already |
179 | unlocked or is being held by a writer. */ | 158 | unlocked or is being held by a writer. */ |
180 | static inline void _raw_read_unlock(rwlock_t *rw) | 159 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
181 | { | 160 | { |
182 | unsigned int tmp; | 161 | unsigned int tmp; |
183 | 162 | ||
184 | if (R10000_LLSC_WAR) { | 163 | if (R10000_LLSC_WAR) { |
185 | __asm__ __volatile__( | 164 | __asm__ __volatile__( |
186 | "1: ll %1, %2 # _raw_read_unlock \n" | 165 | "1: ll %1, %2 # __raw_read_unlock \n" |
187 | " sub %1, 1 \n" | 166 | " sub %1, 1 \n" |
188 | " sc %1, %0 \n" | 167 | " sc %1, %0 \n" |
189 | " beqzl %1, 1b \n" | 168 | " beqzl %1, 1b \n" |
@@ -193,7 +172,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
193 | : "memory"); | 172 | : "memory"); |
194 | } else { | 173 | } else { |
195 | __asm__ __volatile__( | 174 | __asm__ __volatile__( |
196 | " .set noreorder # _raw_read_unlock \n" | 175 | " .set noreorder # __raw_read_unlock \n" |
197 | "1: ll %1, %2 \n" | 176 | "1: ll %1, %2 \n" |
198 | " sub %1, 1 \n" | 177 | " sub %1, 1 \n" |
199 | " sc %1, %0 \n" | 178 | " sc %1, %0 \n" |
@@ -206,13 +185,13 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
206 | } | 185 | } |
207 | } | 186 | } |
208 | 187 | ||
209 | static inline void _raw_write_lock(rwlock_t *rw) | 188 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
210 | { | 189 | { |
211 | unsigned int tmp; | 190 | unsigned int tmp; |
212 | 191 | ||
213 | if (R10000_LLSC_WAR) { | 192 | if (R10000_LLSC_WAR) { |
214 | __asm__ __volatile__( | 193 | __asm__ __volatile__( |
215 | " .set noreorder # _raw_write_lock \n" | 194 | " .set noreorder # __raw_write_lock \n" |
216 | "1: ll %1, %2 \n" | 195 | "1: ll %1, %2 \n" |
217 | " bnez %1, 1b \n" | 196 | " bnez %1, 1b \n" |
218 | " lui %1, 0x8000 \n" | 197 | " lui %1, 0x8000 \n" |
@@ -226,7 +205,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
226 | : "memory"); | 205 | : "memory"); |
227 | } else { | 206 | } else { |
228 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
229 | " .set noreorder # _raw_write_lock \n" | 208 | " .set noreorder # __raw_write_lock \n" |
230 | "1: ll %1, %2 \n" | 209 | "1: ll %1, %2 \n" |
231 | " bnez %1, 1b \n" | 210 | " bnez %1, 1b \n" |
232 | " lui %1, 0x8000 \n" | 211 | " lui %1, 0x8000 \n" |
@@ -241,26 +220,26 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
241 | } | 220 | } |
242 | } | 221 | } |
243 | 222 | ||
244 | static inline void _raw_write_unlock(rwlock_t *rw) | 223 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
245 | { | 224 | { |
246 | __asm__ __volatile__( | 225 | __asm__ __volatile__( |
247 | " sync # _raw_write_unlock \n" | 226 | " sync # __raw_write_unlock \n" |
248 | " sw $0, %0 \n" | 227 | " sw $0, %0 \n" |
249 | : "=m" (rw->lock) | 228 | : "=m" (rw->lock) |
250 | : "m" (rw->lock) | 229 | : "m" (rw->lock) |
251 | : "memory"); | 230 | : "memory"); |
252 | } | 231 | } |
253 | 232 | ||
254 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 233 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
255 | 234 | ||
256 | static inline int _raw_write_trylock(rwlock_t *rw) | 235 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
257 | { | 236 | { |
258 | unsigned int tmp; | 237 | unsigned int tmp; |
259 | int ret; | 238 | int ret; |
260 | 239 | ||
261 | if (R10000_LLSC_WAR) { | 240 | if (R10000_LLSC_WAR) { |
262 | __asm__ __volatile__( | 241 | __asm__ __volatile__( |
263 | " .set noreorder # _raw_write_trylock \n" | 242 | " .set noreorder # __raw_write_trylock \n" |
264 | " li %2, 0 \n" | 243 | " li %2, 0 \n" |
265 | "1: ll %1, %3 \n" | 244 | "1: ll %1, %3 \n" |
266 | " bnez %1, 2f \n" | 245 | " bnez %1, 2f \n" |
@@ -277,7 +256,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
277 | : "memory"); | 256 | : "memory"); |
278 | } else { | 257 | } else { |
279 | __asm__ __volatile__( | 258 | __asm__ __volatile__( |
280 | " .set noreorder # _raw_write_trylock \n" | 259 | " .set noreorder # __raw_write_trylock \n" |
281 | " li %2, 0 \n" | 260 | " li %2, 0 \n" |
282 | "1: ll %1, %3 \n" | 261 | "1: ll %1, %3 \n" |
283 | " bnez %1, 2f \n" | 262 | " bnez %1, 2f \n" |
diff --git a/include/asm-mips/spinlock_types.h b/include/asm-mips/spinlock_types.h new file mode 100644 index 000000000000..ce26c5048b15 --- /dev/null +++ b/include/asm-mips/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ASM_SPINLOCK_TYPES_H | ||
2 | #define _ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index fb42f99f8527..7b5e64600bc8 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
17 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
18 | #include <asm/offset.h> | 18 | #include <asm/asm-offsets.h> |
19 | 19 | ||
20 | .macro SAVE_AT | 20 | .macro SAVE_AT |
21 | .set push | 21 | .set push |
diff --git a/include/asm-mips/vr41xx/tb0287.h b/include/asm-mips/vr41xx/tb0287.h new file mode 100644 index 000000000000..dd9832313afe --- /dev/null +++ b/include/asm-mips/vr41xx/tb0287.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * tb0287.h, Include file for TANBAC TB0287 mini-ITX board. | ||
3 | * | ||
4 | * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp> | ||
5 | * | ||
6 | * This code is largely based on tb0219.h. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | #ifndef __TANBAC_TB0287_H | ||
23 | #define __TANBAC_TB0287_H | ||
24 | |||
25 | #include <asm/vr41xx/vr41xx.h> | ||
26 | |||
27 | /* | ||
28 | * General-Purpose I/O Pin Number | ||
29 | */ | ||
30 | #define TB0287_PCI_SLOT_PIN 2 | ||
31 | #define TB0287_SM501_PIN 3 | ||
32 | #define TB0287_SIL680A_PIN 8 | ||
33 | #define TB0287_RTL8110_PIN 13 | ||
34 | |||
35 | /* | ||
36 | * Interrupt Number | ||
37 | */ | ||
38 | #define TB0287_PCI_SLOT_IRQ GIU_IRQ(TB0287_PCI_SLOT_PIN) | ||
39 | #define TB0287_SM501_IRQ GIU_IRQ(TB0287_SM501_PIN) | ||
40 | #define TB0287_SIL680A_IRQ GIU_IRQ(TB0287_SIL680A_PIN) | ||
41 | #define TB0287_RTL8110_IRQ GIU_IRQ(TB0287_RTL8110_PIN) | ||
42 | |||
43 | #endif /* __TANBAC_TB0287_H */ | ||
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h index cbc286f49b36..30b023411fef 100644 --- a/include/asm-parisc/assembly.h +++ b/include/asm-parisc/assembly.h | |||
@@ -63,7 +63,7 @@ | |||
63 | .level 2.0w | 63 | .level 2.0w |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #include <asm/offsets.h> | 66 | #include <asm/asm-offsets.h> |
67 | #include <asm/page.h> | 67 | #include <asm/page.h> |
68 | 68 | ||
69 | #include <asm/asmregs.h> | 69 | #include <asm/asmregs.h> |
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index e24f7579adb0..048a2c7fd0c0 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h | |||
@@ -24,19 +24,19 @@ | |||
24 | # define ATOMIC_HASH_SIZE 4 | 24 | # define ATOMIC_HASH_SIZE 4 |
25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
26 | 26 | ||
27 | extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 27 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
28 | 28 | ||
29 | /* Can't use _raw_spin_lock_irq because of #include problems, so | 29 | /* Can't use raw_spin_lock_irq because of #include problems, so |
30 | * this is the substitute */ | 30 | * this is the substitute */ |
31 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
32 | spinlock_t *s = ATOMIC_HASH(l); \ | 32 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
33 | local_irq_save(f); \ | 33 | local_irq_save(f); \ |
34 | _raw_spin_lock(s); \ | 34 | __raw_spin_lock(s); \ |
35 | } while(0) | 35 | } while(0) |
36 | 36 | ||
37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
38 | spinlock_t *s = ATOMIC_HASH(l); \ | 38 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
39 | _raw_spin_unlock(s); \ | 39 | __raw_spin_unlock(s); \ |
40 | local_irq_restore(f); \ | 40 | local_irq_restore(f); \ |
41 | } while(0) | 41 | } while(0) |
42 | 42 | ||
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 928e5ef850bd..af7db694b22d 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/system.h> | 5 | #include <asm/spinlock.h> |
6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
8 | 8 | ||
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 06732719d927..aa592d8c0e39 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ | ||
6 | 7 | ||
7 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". |
8 | * Unfortunately, that doesn't apply to PA-RISC. */ | 9 | * Unfortunately, that doesn't apply to PA-RISC. */ |
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h index 0b61f51d8467..a9dfadd05658 100644 --- a/include/asm-parisc/processor.h +++ b/include/asm-parisc/processor.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/spinlock_types.h> | ||
14 | 15 | ||
15 | #include <asm/hardware.h> | 16 | #include <asm/hardware.h> |
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 679ea1c651ef..43eaa6e742e0 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
@@ -2,30 +2,25 @@ | |||
2 | #define __ASM_SPINLOCK_H | 2 | #define __ASM_SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | #include <asm/processor.h> | ||
6 | #include <asm/spinlock_types.h> | ||
5 | 7 | ||
6 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | 8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked |
7 | * since it only has load-and-zero. Moreover, at least on some PA processors, | 9 | * since it only has load-and-zero. Moreover, at least on some PA processors, |
8 | * the semaphore address has to be 16-byte aligned. | 10 | * the semaphore address has to be 16-byte aligned. |
9 | */ | 11 | */ |
10 | 12 | ||
11 | #ifndef CONFIG_DEBUG_SPINLOCK | 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
12 | |||
13 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
14 | #undef SPIN_LOCK_UNLOCKED | ||
15 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
16 | |||
17 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
18 | |||
19 | static inline int spin_is_locked(spinlock_t *x) | ||
20 | { | 14 | { |
21 | volatile unsigned int *a = __ldcw_align(x); | 15 | volatile unsigned int *a = __ldcw_align(x); |
22 | return *a == 0; | 16 | return *a == 0; |
23 | } | 17 | } |
24 | 18 | ||
25 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 19 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
26 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 20 | #define __raw_spin_unlock_wait(x) \ |
21 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
27 | 22 | ||
28 | static inline void _raw_spin_lock(spinlock_t *x) | 23 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
29 | { | 24 | { |
30 | volatile unsigned int *a; | 25 | volatile unsigned int *a; |
31 | 26 | ||
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x) | |||
36 | mb(); | 31 | mb(); |
37 | } | 32 | } |
38 | 33 | ||
39 | static inline void _raw_spin_unlock(spinlock_t *x) | 34 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
40 | { | 35 | { |
41 | volatile unsigned int *a; | 36 | volatile unsigned int *a; |
42 | mb(); | 37 | mb(); |
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x) | |||
45 | mb(); | 40 | mb(); |
46 | } | 41 | } |
47 | 42 | ||
48 | static inline int _raw_spin_trylock(spinlock_t *x) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
49 | { | 44 | { |
50 | volatile unsigned int *a; | 45 | volatile unsigned int *a; |
51 | int ret; | 46 | int ret; |
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x) | |||
57 | 52 | ||
58 | return ret; | 53 | return ret; |
59 | } | 54 | } |
60 | |||
61 | #define spin_lock_own(LOCK, LOCATION) ((void)0) | ||
62 | |||
63 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
64 | |||
65 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
66 | |||
67 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL } | ||
68 | #undef SPIN_LOCK_UNLOCKED | ||
69 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
70 | |||
71 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
72 | |||
73 | #define CHECK_LOCK(x) \ | ||
74 | do { \ | ||
75 | if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \ | ||
76 | printk(KERN_ERR "%s:%d: spin_is_locked" \ | ||
77 | " on uninitialized spinlock %p.\n", \ | ||
78 | __FILE__, __LINE__, (x)); \ | ||
79 | } \ | ||
80 | } while(0) | ||
81 | |||
82 | #define spin_is_locked(x) \ | ||
83 | ({ \ | ||
84 | CHECK_LOCK(x); \ | ||
85 | volatile unsigned int *a = __ldcw_align(x); \ | ||
86 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
87 | (x)->babble--; \ | ||
88 | printk("KERN_WARNING \ | ||
89 | %s:%d: spin_is_locked(%s/%p) already" \ | ||
90 | " locked by %s:%d in %s at %p(%d)\n", \ | ||
91 | __FILE__,__LINE__, (x)->module, (x), \ | ||
92 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
93 | (x)->previous, (x)->oncpu); \ | ||
94 | } \ | ||
95 | *a == 0; \ | ||
96 | }) | ||
97 | |||
98 | #define spin_unlock_wait(x) \ | ||
99 | do { \ | ||
100 | CHECK_LOCK(x); \ | ||
101 | volatile unsigned int *a = __ldcw_align(x); \ | ||
102 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
103 | (x)->babble--; \ | ||
104 | printk("KERN_WARNING \ | ||
105 | %s:%d: spin_unlock_wait(%s/%p)" \ | ||
106 | " owned by %s:%d in %s at %p(%d)\n", \ | ||
107 | __FILE__,__LINE__, (x)->module, (x), \ | ||
108 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
109 | (x)->previous, (x)->oncpu); \ | ||
110 | } \ | ||
111 | barrier(); \ | ||
112 | } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0) | ||
113 | |||
114 | extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no); | ||
115 | extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int); | ||
116 | extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int); | ||
117 | |||
118 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
119 | |||
120 | #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__) | ||
121 | #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__) | ||
122 | #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__) | ||
123 | |||
124 | /* just in case we need it */ | ||
125 | #define spin_lock_own(LOCK, LOCATION) \ | ||
126 | do { \ | ||
127 | volatile unsigned int *a = __ldcw_align(LOCK); \ | ||
128 | if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \ | ||
129 | printk("KERN_WARNING \ | ||
130 | %s: called on %d from %p but lock %s on %d\n", \ | ||
131 | LOCATION, smp_processor_id(), \ | ||
132 | __builtin_return_address(0), \ | ||
133 | (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \ | ||
134 | } while (0) | ||
135 | |||
136 | #endif /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
137 | 55 | ||
138 | /* | 56 | /* |
139 | * Read-write spinlocks, allowing multiple readers | 57 | * Read-write spinlocks, allowing multiple readers |
140 | * but only one writer. | 58 | * but only one writer. |
141 | */ | 59 | */ |
142 | typedef struct { | ||
143 | spinlock_t lock; | ||
144 | volatile int counter; | ||
145 | #ifdef CONFIG_PREEMPT | ||
146 | unsigned int break_lock; | ||
147 | #endif | ||
148 | } rwlock_t; | ||
149 | |||
150 | #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 } | ||
151 | |||
152 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | ||
153 | 60 | ||
154 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 61 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
155 | 62 | ||
156 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | 63 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow |
157 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | 64 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ |
158 | 65 | ||
159 | #ifdef CONFIG_DEBUG_RWLOCK | 66 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
160 | extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline); | ||
161 | #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__) | ||
162 | #else | ||
163 | static __inline__ void _raw_read_lock(rwlock_t *rw) | ||
164 | { | 67 | { |
165 | unsigned long flags; | 68 | unsigned long flags; |
166 | local_irq_save(flags); | 69 | local_irq_save(flags); |
167 | _raw_spin_lock(&rw->lock); | 70 | __raw_spin_lock(&rw->lock); |
168 | 71 | ||
169 | rw->counter++; | 72 | rw->counter++; |
170 | 73 | ||
171 | _raw_spin_unlock(&rw->lock); | 74 | __raw_spin_unlock(&rw->lock); |
172 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
173 | } | 76 | } |
174 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
175 | 77 | ||
176 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 78 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
177 | { | 79 | { |
178 | unsigned long flags; | 80 | unsigned long flags; |
179 | local_irq_save(flags); | 81 | local_irq_save(flags); |
180 | _raw_spin_lock(&rw->lock); | 82 | __raw_spin_lock(&rw->lock); |
181 | 83 | ||
182 | rw->counter--; | 84 | rw->counter--; |
183 | 85 | ||
184 | _raw_spin_unlock(&rw->lock); | 86 | __raw_spin_unlock(&rw->lock); |
185 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
186 | } | 88 | } |
187 | 89 | ||
@@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
194 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | 96 | * writers) in interrupt handlers someone fucked up and we'd dead-lock |
195 | * sooner or later anyway. prumpf */ | 97 | * sooner or later anyway. prumpf */ |
196 | 98 | ||
197 | #ifdef CONFIG_DEBUG_RWLOCK | 99 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
198 | extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline); | ||
199 | #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__) | ||
200 | #else | ||
201 | static __inline__ void _raw_write_lock(rwlock_t *rw) | ||
202 | { | 100 | { |
203 | retry: | 101 | retry: |
204 | _raw_spin_lock(&rw->lock); | 102 | __raw_spin_lock(&rw->lock); |
205 | 103 | ||
206 | if(rw->counter != 0) { | 104 | if(rw->counter != 0) { |
207 | /* this basically never happens */ | 105 | /* this basically never happens */ |
208 | _raw_spin_unlock(&rw->lock); | 106 | __raw_spin_unlock(&rw->lock); |
209 | 107 | ||
210 | while(rw->counter != 0); | 108 | while (rw->counter != 0) |
109 | cpu_relax(); | ||
211 | 110 | ||
212 | goto retry; | 111 | goto retry; |
213 | } | 112 | } |
@@ -215,26 +114,21 @@ retry: | |||
215 | /* got it. now leave without unlocking */ | 114 | /* got it. now leave without unlocking */ |
216 | rw->counter = -1; /* remember we are locked */ | 115 | rw->counter = -1; /* remember we are locked */ |
217 | } | 116 | } |
218 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
219 | 117 | ||
220 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 118 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ |
221 | 119 | ||
222 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 120 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
223 | { | 121 | { |
224 | rw->counter = 0; | 122 | rw->counter = 0; |
225 | _raw_spin_unlock(&rw->lock); | 123 | __raw_spin_unlock(&rw->lock); |
226 | } | 124 | } |
227 | 125 | ||
228 | #ifdef CONFIG_DEBUG_RWLOCK | 126 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
229 | extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline); | ||
230 | #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__) | ||
231 | #else | ||
232 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | ||
233 | { | 127 | { |
234 | _raw_spin_lock(&rw->lock); | 128 | __raw_spin_lock(&rw->lock); |
235 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
236 | /* this basically never happens */ | 130 | /* this basically never happens */ |
237 | _raw_spin_unlock(&rw->lock); | 131 | __raw_spin_unlock(&rw->lock); |
238 | 132 | ||
239 | return 0; | 133 | return 0; |
240 | } | 134 | } |
@@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
243 | rw->counter = -1; /* remember we are locked */ | 137 | rw->counter = -1; /* remember we are locked */ |
244 | return 1; | 138 | return 1; |
245 | } | 139 | } |
246 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
247 | 140 | ||
248 | static __inline__ int is_read_locked(rwlock_t *rw) | 141 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
249 | { | 142 | { |
250 | return rw->counter > 0; | 143 | return rw->counter > 0; |
251 | } | 144 | } |
252 | 145 | ||
253 | static __inline__ int is_write_locked(rwlock_t *rw) | 146 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
254 | { | 147 | { |
255 | return rw->counter < 0; | 148 | return rw->counter < 0; |
256 | } | 149 | } |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h new file mode 100644 index 000000000000..785bba822fbf --- /dev/null +++ b/include/asm-parisc/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock[4]; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
13 | |||
14 | typedef struct { | ||
15 | raw_spinlock_t lock; | ||
16 | volatile int counter; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 81c543339036..26ff844a21c1 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
@@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val) | |||
160 | }) | 160 | }) |
161 | 161 | ||
162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
163 | /* | 163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
164 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
165 | */ | ||
166 | |||
167 | typedef struct { | ||
168 | volatile unsigned int lock[4]; | ||
169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
170 | unsigned long magic; | ||
171 | volatile unsigned int babble; | ||
172 | const char *module; | ||
173 | char *bfile; | ||
174 | int bline; | ||
175 | int oncpu; | ||
176 | void *previous; | ||
177 | struct task_struct * task; | ||
178 | #endif | ||
179 | #ifdef CONFIG_PREEMPT | ||
180 | unsigned int break_lock; | ||
181 | #endif | ||
182 | } spinlock_t; | ||
183 | |||
184 | #define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
185 | |||
186 | #endif | 164 | #endif |
187 | 165 | ||
188 | #define KERNEL_START (0x10100000 - 0x1000) | 166 | #define KERNEL_START (0x10100000 - 0x1000) |
diff --git a/include/asm-powerpc/siginfo.h b/include/asm-powerpc/siginfo.h index 538ea8ef509b..12f1bce037be 100644 --- a/include/asm-powerpc/siginfo.h +++ b/include/asm-powerpc/siginfo.h | |||
@@ -15,4 +15,12 @@ | |||
15 | 15 | ||
16 | #include <asm-generic/siginfo.h> | 16 | #include <asm-generic/siginfo.h> |
17 | 17 | ||
18 | /* | ||
19 | * SIGTRAP si_codes | ||
20 | */ | ||
21 | #define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */ | ||
22 | #define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */ | ||
23 | #undef NSIGTRAP | ||
24 | #define NSIGTRAP 4 | ||
25 | |||
18 | #endif /* _ASM_POWERPC_SIGINFO_H */ | 26 | #endif /* _ASM_POWERPC_SIGINFO_H */ |
diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h index b4b270457edd..55752474d0d9 100644 --- a/include/asm-ppc/irq.h +++ b/include/asm-ppc/irq.h | |||
@@ -404,9 +404,5 @@ extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | |||
404 | extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | 404 | extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; |
405 | extern atomic_t ppc_n_lost_interrupts; | 405 | extern atomic_t ppc_n_lost_interrupts; |
406 | 406 | ||
407 | struct irqaction; | ||
408 | struct pt_regs; | ||
409 | int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); | ||
410 | |||
411 | #endif /* _ASM_IRQ_H */ | 407 | #endif /* _ASM_IRQ_H */ |
412 | #endif /* __KERNEL__ */ | 408 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h index 9d4e4ea530c3..7043c164b537 100644 --- a/include/asm-ppc/ptrace.h +++ b/include/asm-ppc/ptrace.h | |||
@@ -142,4 +142,11 @@ do { \ | |||
142 | #define PTRACE_GETEVRREGS 20 | 142 | #define PTRACE_GETEVRREGS 20 |
143 | #define PTRACE_SETEVRREGS 21 | 143 | #define PTRACE_SETEVRREGS 21 |
144 | 144 | ||
145 | /* | ||
146 | * Get or set a debug register. The first 16 are DABR registers and the | ||
147 | * second 16 are IABR registers. | ||
148 | */ | ||
149 | #define PTRACE_GET_DEBUGREG 25 | ||
150 | #define PTRACE_SET_DEBUGREG 26 | ||
151 | |||
145 | #endif | 152 | #endif |
diff --git a/include/asm-ppc/reg.h b/include/asm-ppc/reg.h index 88b4222154d4..73c33e3ef9c6 100644 --- a/include/asm-ppc/reg.h +++ b/include/asm-ppc/reg.h | |||
@@ -366,12 +366,6 @@ | |||
366 | #define PVR_STB03XXX 0x40310000 | 366 | #define PVR_STB03XXX 0x40310000 |
367 | #define PVR_NP405H 0x41410000 | 367 | #define PVR_NP405H 0x41410000 |
368 | #define PVR_NP405L 0x41610000 | 368 | #define PVR_NP405L 0x41610000 |
369 | #define PVR_440GP_RB 0x40120440 | ||
370 | #define PVR_440GP_RC1 0x40120481 | ||
371 | #define PVR_440GP_RC2 0x40200481 | ||
372 | #define PVR_440GX_RA 0x51b21850 | ||
373 | #define PVR_440GX_RB 0x51b21851 | ||
374 | #define PVR_440GX_RC 0x51b21892 | ||
375 | #define PVR_601 0x00010000 | 369 | #define PVR_601 0x00010000 |
376 | #define PVR_602 0x00050000 | 370 | #define PVR_602 0x00050000 |
377 | #define PVR_603 0x00030000 | 371 | #define PVR_603 0x00030000 |
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index 17530c232c76..829481c0a9dc 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h | |||
@@ -41,6 +41,10 @@ extern void smp_send_xmon_break(int cpu); | |||
41 | struct pt_regs; | 41 | struct pt_regs; |
42 | extern void smp_message_recv(int, struct pt_regs *); | 42 | extern void smp_message_recv(int, struct pt_regs *); |
43 | 43 | ||
44 | extern int __cpu_disable(void); | ||
45 | extern void __cpu_die(unsigned int cpu); | ||
46 | extern void cpu_die(void) __attribute__((noreturn)); | ||
47 | |||
44 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | 48 | #define NO_PROC_ID 0xFF /* No processor magic marker */ |
45 | #define PROC_CHANGE_PENALTY 20 | 49 | #define PROC_CHANGE_PENALTY 20 |
46 | 50 | ||
@@ -64,6 +68,8 @@ extern struct klock_info_struct klock_info; | |||
64 | 68 | ||
65 | #else /* !(CONFIG_SMP) */ | 69 | #else /* !(CONFIG_SMP) */ |
66 | 70 | ||
71 | static inline void cpu_die(void) { } | ||
72 | |||
67 | #endif /* !(CONFIG_SMP) */ | 73 | #endif /* !(CONFIG_SMP) */ |
68 | 74 | ||
69 | #endif /* !(_PPC_SMP_H) */ | 75 | #endif /* !(_PPC_SMP_H) */ |
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h index 909199aae104..20edcf2a6e0c 100644 --- a/include/asm-ppc/spinlock.h +++ b/include/asm-ppc/spinlock.h | |||
@@ -5,41 +5,21 @@ | |||
5 | 5 | ||
6 | /* | 6 | /* |
7 | * Simple spin lock operations. | 7 | * Simple spin lock operations. |
8 | * | ||
9 | * (the type definitions are in asm/raw_spinlock_types.h) | ||
8 | */ | 10 | */ |
9 | 11 | ||
10 | typedef struct { | 12 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
11 | volatile unsigned long lock; | 13 | #define __raw_spin_unlock_wait(lock) \ |
12 | #ifdef CONFIG_DEBUG_SPINLOCK | 14 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
13 | volatile unsigned long owner_pc; | 15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
14 | volatile unsigned long owner_cpu; | 16 | |
15 | #endif | 17 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
16 | #ifdef CONFIG_PREEMPT | ||
17 | unsigned int break_lock; | ||
18 | #endif | ||
19 | } spinlock_t; | ||
20 | |||
21 | #ifdef __KERNEL__ | ||
22 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
23 | #define SPINLOCK_DEBUG_INIT , 0, 0 | ||
24 | #else | ||
25 | #define SPINLOCK_DEBUG_INIT /* */ | ||
26 | #endif | ||
27 | |||
28 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT } | ||
29 | |||
30 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
31 | #define spin_is_locked(x) ((x)->lock != 0) | ||
32 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | ||
33 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
34 | |||
35 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
36 | |||
37 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
38 | { | 18 | { |
39 | unsigned long tmp; | 19 | unsigned long tmp; |
40 | 20 | ||
41 | __asm__ __volatile__( | 21 | __asm__ __volatile__( |
42 | "b 1f # spin_lock\n\ | 22 | "b 1f # __raw_spin_lock\n\ |
43 | 2: lwzx %0,0,%1\n\ | 23 | 2: lwzx %0,0,%1\n\ |
44 | cmpwi 0,%0,0\n\ | 24 | cmpwi 0,%0,0\n\ |
45 | bne+ 2b\n\ | 25 | bne+ 2b\n\ |
@@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
55 | : "cr0", "memory"); | 35 | : "cr0", "memory"); |
56 | } | 36 | } |
57 | 37 | ||
58 | static inline void _raw_spin_unlock(spinlock_t *lock) | 38 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
59 | { | 39 | { |
60 | __asm__ __volatile__("eieio # spin_unlock": : :"memory"); | 40 | __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory"); |
61 | lock->lock = 0; | 41 | lock->lock = 0; |
62 | } | 42 | } |
63 | 43 | ||
64 | #define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) | 44 | #define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) |
65 | |||
66 | #else | ||
67 | |||
68 | extern void _raw_spin_lock(spinlock_t *lock); | ||
69 | extern void _raw_spin_unlock(spinlock_t *lock); | ||
70 | extern int _raw_spin_trylock(spinlock_t *lock); | ||
71 | |||
72 | #endif | ||
73 | 45 | ||
74 | /* | 46 | /* |
75 | * Read-write spinlocks, allowing multiple readers | 47 | * Read-write spinlocks, allowing multiple readers |
@@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock); | |||
81 | * irq-safe write-lock, but readers can get non-irqsafe | 53 | * irq-safe write-lock, but readers can get non-irqsafe |
82 | * read-locks. | 54 | * read-locks. |
83 | */ | 55 | */ |
84 | typedef struct { | ||
85 | volatile signed int lock; | ||
86 | #ifdef CONFIG_PREEMPT | ||
87 | unsigned int break_lock; | ||
88 | #endif | ||
89 | } rwlock_t; | ||
90 | 56 | ||
91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | 57 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
92 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | 58 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
93 | 59 | ||
94 | #define read_can_lock(rw) ((rw)->lock >= 0) | 60 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) |
95 | #define write_can_lock(rw) (!(rw)->lock) | ||
96 | |||
97 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
98 | |||
99 | static __inline__ int _raw_read_trylock(rwlock_t *rw) | ||
100 | { | 61 | { |
101 | signed int tmp; | 62 | signed int tmp; |
102 | 63 | ||
@@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw) | |||
116 | return tmp > 0; | 77 | return tmp > 0; |
117 | } | 78 | } |
118 | 79 | ||
119 | static __inline__ void _raw_read_lock(rwlock_t *rw) | 80 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
120 | { | 81 | { |
121 | signed int tmp; | 82 | signed int tmp; |
122 | 83 | ||
@@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw) | |||
137 | : "cr0", "memory"); | 98 | : "cr0", "memory"); |
138 | } | 99 | } |
139 | 100 | ||
140 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 101 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
141 | { | 102 | { |
142 | signed int tmp; | 103 | signed int tmp; |
143 | 104 | ||
@@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
153 | : "cr0", "memory"); | 114 | : "cr0", "memory"); |
154 | } | 115 | } |
155 | 116 | ||
156 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | 117 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
157 | { | 118 | { |
158 | signed int tmp; | 119 | signed int tmp; |
159 | 120 | ||
@@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
173 | return tmp == 0; | 134 | return tmp == 0; |
174 | } | 135 | } |
175 | 136 | ||
176 | static __inline__ void _raw_write_lock(rwlock_t *rw) | 137 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
177 | { | 138 | { |
178 | signed int tmp; | 139 | signed int tmp; |
179 | 140 | ||
@@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) | |||
194 | : "cr0", "memory"); | 155 | : "cr0", "memory"); |
195 | } | 156 | } |
196 | 157 | ||
197 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 158 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
198 | { | 159 | { |
199 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); | 160 | __asm__ __volatile__("eieio # write_unlock": : :"memory"); |
200 | rw->lock = 0; | 161 | rw->lock = 0; |
201 | } | 162 | } |
202 | 163 | ||
203 | #else | ||
204 | |||
205 | extern void _raw_read_lock(rwlock_t *rw); | ||
206 | extern void _raw_read_unlock(rwlock_t *rw); | ||
207 | extern void _raw_write_lock(rwlock_t *rw); | ||
208 | extern void _raw_write_unlock(rwlock_t *rw); | ||
209 | extern int _raw_read_trylock(rwlock_t *rw); | ||
210 | extern int _raw_write_trylock(rwlock_t *rw); | ||
211 | |||
212 | #endif | ||
213 | |||
214 | #endif /* __ASM_SPINLOCK_H */ | 164 | #endif /* __ASM_SPINLOCK_H */ |
215 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-ppc/spinlock_types.h b/include/asm-ppc/spinlock_types.h new file mode 100644 index 000000000000..7919ccc75b8a --- /dev/null +++ b/include/asm-ppc/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned long lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile signed int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index 513a334c5810..d754ab570fe0 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h | |||
@@ -88,6 +88,7 @@ extern void *cacheable_memcpy(void *, const void *, unsigned int); | |||
88 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | 88 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); |
89 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | 89 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); |
90 | extern void die(const char *, struct pt_regs *, long); | 90 | extern void die(const char *, struct pt_regs *, long); |
91 | extern void _exception(int, struct pt_regs *, int, unsigned long); | ||
91 | #ifdef CONFIG_BOOKE_WDT | 92 | #ifdef CONFIG_BOOKE_WDT |
92 | extern u32 booke_wdt_enabled; | 93 | extern u32 booke_wdt_enabled; |
93 | extern u32 booke_wdt_period; | 94 | extern u32 booke_wdt_period; |
diff --git a/include/asm-ppc64/hvcall.h b/include/asm-ppc64/hvcall.h index 4f668a4baff0..ab7c3cf24888 100644 --- a/include/asm-ppc64/hvcall.h +++ b/include/asm-ppc64/hvcall.h | |||
@@ -56,6 +56,11 @@ | |||
56 | #define H_PP1 (1UL<<(63-62)) | 56 | #define H_PP1 (1UL<<(63-62)) |
57 | #define H_PP2 (1UL<<(63-63)) | 57 | #define H_PP2 (1UL<<(63-63)) |
58 | 58 | ||
59 | /* DABRX flags */ | ||
60 | #define H_DABRX_HYPERVISOR (1UL<<(63-61)) | ||
61 | #define H_DABRX_KERNEL (1UL<<(63-62)) | ||
62 | #define H_DABRX_USER (1UL<<(63-63)) | ||
63 | |||
59 | /* pSeries hypervisor opcodes */ | 64 | /* pSeries hypervisor opcodes */ |
60 | #define H_REMOVE 0x04 | 65 | #define H_REMOVE 0x04 |
61 | #define H_ENTER 0x08 | 66 | #define H_ENTER 0x08 |
@@ -101,6 +106,7 @@ | |||
101 | #define H_VIO_SIGNAL 0x104 | 106 | #define H_VIO_SIGNAL 0x104 |
102 | #define H_SEND_CRQ 0x108 | 107 | #define H_SEND_CRQ 0x108 |
103 | #define H_COPY_RDMA 0x110 | 108 | #define H_COPY_RDMA 0x110 |
109 | #define H_SET_XDABR 0x134 | ||
104 | #define H_STUFF_TCE 0x138 | 110 | #define H_STUFF_TCE 0x138 |
105 | #define H_PUT_TCE_INDIRECT 0x13C | 111 | #define H_PUT_TCE_INDIRECT 0x13C |
106 | #define H_VTERM_PARTNER_INFO 0x150 | 112 | #define H_VTERM_PARTNER_INFO 0x150 |
diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h index 9a1ef4427ed2..8027160ec96d 100644 --- a/include/asm-ppc64/machdep.h +++ b/include/asm-ppc64/machdep.h | |||
@@ -88,6 +88,7 @@ struct machdep_calls { | |||
88 | 88 | ||
89 | /* PCI stuff */ | 89 | /* PCI stuff */ |
90 | void (*pcibios_fixup)(void); | 90 | void (*pcibios_fixup)(void); |
91 | int (*pci_probe_mode)(struct pci_bus *); | ||
91 | 92 | ||
92 | void (*restart)(char *cmd); | 93 | void (*restart)(char *cmd); |
93 | void (*power_off)(void); | 94 | void (*power_off)(void); |
@@ -173,10 +174,6 @@ extern sys_ctrler_t sys_ctrler; | |||
173 | void ppc64_boot_msg(unsigned int src, const char *msg); | 174 | void ppc64_boot_msg(unsigned int src, const char *msg); |
174 | /* Print a termination message (print only -- does not stop the kernel) */ | 175 | /* Print a termination message (print only -- does not stop the kernel) */ |
175 | void ppc64_terminate_msg(unsigned int src, const char *msg); | 176 | void ppc64_terminate_msg(unsigned int src, const char *msg); |
176 | /* Print something that needs attention (device error, etc) */ | ||
177 | void ppc64_attention_msg(unsigned int src, const char *msg); | ||
178 | /* Print a dump progress message. */ | ||
179 | void ppc64_dump_msg(unsigned int src, const char *msg); | ||
180 | 177 | ||
181 | static inline void log_error(char *buf, unsigned int err_type, int fatal) | 178 | static inline void log_error(char *buf, unsigned int err_type, int fatal) |
182 | { | 179 | { |
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h index 6b4a5b1f695e..d8991389ab39 100644 --- a/include/asm-ppc64/pci-bridge.h +++ b/include/asm-ppc64/pci-bridge.h | |||
@@ -119,5 +119,10 @@ static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | |||
119 | return PCI_DN(busdn)->phb; | 119 | return PCI_DN(busdn)->phb; |
120 | } | 120 | } |
121 | 121 | ||
122 | /* Return values for ppc_md.pci_probe_mode function */ | ||
123 | #define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ | ||
124 | #define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ | ||
125 | #define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */ | ||
126 | |||
122 | #endif | 127 | #endif |
123 | #endif /* __KERNEL__ */ | 128 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-ppc64/plpar_wrappers.h b/include/asm-ppc64/plpar_wrappers.h index f4a5fb7d67c7..72dd2449ee76 100644 --- a/include/asm-ppc64/plpar_wrappers.h +++ b/include/asm-ppc64/plpar_wrappers.h | |||
@@ -107,5 +107,14 @@ static inline long plpar_put_term_char(unsigned long termno, | |||
107 | lbuf[1]); | 107 | lbuf[1]); |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline long plpar_set_xdabr(unsigned long address, unsigned long flags) | ||
111 | { | ||
112 | return plpar_hcall_norets(H_SET_XDABR, address, flags); | ||
113 | } | ||
114 | |||
115 | static inline long plpar_set_dabr(unsigned long val) | ||
116 | { | ||
117 | return plpar_hcall_norets(H_SET_DABR, val); | ||
118 | } | ||
110 | 119 | ||
111 | #endif /* _PPC64_PLPAR_WRAPPERS_H */ | 120 | #endif /* _PPC64_PLPAR_WRAPPERS_H */ |
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h index 8bd7aa959385..4146189006e3 100644 --- a/include/asm-ppc64/processor.h +++ b/include/asm-ppc64/processor.h | |||
@@ -433,6 +433,7 @@ struct thread_struct { | |||
433 | unsigned long start_tb; /* Start purr when proc switched in */ | 433 | unsigned long start_tb; /* Start purr when proc switched in */ |
434 | unsigned long accum_tb; /* Total accumilated purr for process */ | 434 | unsigned long accum_tb; /* Total accumilated purr for process */ |
435 | unsigned long vdso_base; /* base of the vDSO library */ | 435 | unsigned long vdso_base; /* base of the vDSO library */ |
436 | unsigned long dabr; /* Data address breakpoint register */ | ||
436 | #ifdef CONFIG_ALTIVEC | 437 | #ifdef CONFIG_ALTIVEC |
437 | /* Complete AltiVec register set */ | 438 | /* Complete AltiVec register set */ |
438 | vector128 vr[32] __attribute((aligned(16))); | 439 | vector128 vr[32] __attribute((aligned(16))); |
diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h index af03547f9c7e..b1babb729673 100644 --- a/include/asm-ppc64/ptrace-common.h +++ b/include/asm-ppc64/ptrace-common.h | |||
@@ -11,6 +11,10 @@ | |||
11 | 11 | ||
12 | #ifndef _PPC64_PTRACE_COMMON_H | 12 | #ifndef _PPC64_PTRACE_COMMON_H |
13 | #define _PPC64_PTRACE_COMMON_H | 13 | #define _PPC64_PTRACE_COMMON_H |
14 | |||
15 | #include <linux/config.h> | ||
16 | #include <asm/system.h> | ||
17 | |||
14 | /* | 18 | /* |
15 | * Set of msr bits that gdb can change on behalf of a process. | 19 | * Set of msr bits that gdb can change on behalf of a process. |
16 | */ | 20 | */ |
@@ -69,4 +73,92 @@ static inline void clear_single_step(struct task_struct *task) | |||
69 | clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); | 73 | clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); |
70 | } | 74 | } |
71 | 75 | ||
76 | #ifdef CONFIG_ALTIVEC | ||
77 | /* | ||
78 | * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. | ||
79 | * The transfer totals 34 quadword. Quadwords 0-31 contain the | ||
80 | * corresponding vector registers. Quadword 32 contains the vscr as the | ||
81 | * last word (offset 12) within that quadword. Quadword 33 contains the | ||
82 | * vrsave as the first word (offset 0) within the quadword. | ||
83 | * | ||
84 | * This definition of the VMX state is compatible with the current PPC32 | ||
85 | * ptrace interface. This allows signal handling and ptrace to use the | ||
86 | * same structures. This also simplifies the implementation of a bi-arch | ||
87 | * (combined (32- and 64-bit) gdb. | ||
88 | */ | ||
89 | |||
90 | /* | ||
91 | * Get contents of AltiVec register state in task TASK | ||
92 | */ | ||
93 | static inline int get_vrregs(unsigned long __user *data, | ||
94 | struct task_struct *task) | ||
95 | { | ||
96 | unsigned long regsize; | ||
97 | |||
98 | /* copy AltiVec registers VR[0] .. VR[31] */ | ||
99 | regsize = 32 * sizeof(vector128); | ||
100 | if (copy_to_user(data, task->thread.vr, regsize)) | ||
101 | return -EFAULT; | ||
102 | data += (regsize / sizeof(unsigned long)); | ||
103 | |||
104 | /* copy VSCR */ | ||
105 | regsize = 1 * sizeof(vector128); | ||
106 | if (copy_to_user(data, &task->thread.vscr, regsize)) | ||
107 | return -EFAULT; | ||
108 | data += (regsize / sizeof(unsigned long)); | ||
109 | |||
110 | /* copy VRSAVE */ | ||
111 | if (put_user(task->thread.vrsave, (u32 __user *)data)) | ||
112 | return -EFAULT; | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Write contents of AltiVec register state into task TASK. | ||
119 | */ | ||
120 | static inline int set_vrregs(struct task_struct *task, | ||
121 | unsigned long __user *data) | ||
122 | { | ||
123 | unsigned long regsize; | ||
124 | |||
125 | /* copy AltiVec registers VR[0] .. VR[31] */ | ||
126 | regsize = 32 * sizeof(vector128); | ||
127 | if (copy_from_user(task->thread.vr, data, regsize)) | ||
128 | return -EFAULT; | ||
129 | data += (regsize / sizeof(unsigned long)); | ||
130 | |||
131 | /* copy VSCR */ | ||
132 | regsize = 1 * sizeof(vector128); | ||
133 | if (copy_from_user(&task->thread.vscr, data, regsize)) | ||
134 | return -EFAULT; | ||
135 | data += (regsize / sizeof(unsigned long)); | ||
136 | |||
137 | /* copy VRSAVE */ | ||
138 | if (get_user(task->thread.vrsave, (u32 __user *)data)) | ||
139 | return -EFAULT; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | static inline int ptrace_set_debugreg(struct task_struct *task, | ||
146 | unsigned long addr, unsigned long data) | ||
147 | { | ||
148 | /* We only support one DABR and no IABRS at the moment */ | ||
149 | if (addr > 0) | ||
150 | return -EINVAL; | ||
151 | |||
152 | /* The bottom 3 bits are flags */ | ||
153 | if ((data & ~0x7UL) >= TASK_SIZE) | ||
154 | return -EIO; | ||
155 | |||
156 | /* Ensure translation is on */ | ||
157 | if (data && !(data & DABR_TRANSLATION)) | ||
158 | return -EIO; | ||
159 | |||
160 | task->thread.dabr = data; | ||
161 | return 0; | ||
162 | } | ||
163 | |||
72 | #endif /* _PPC64_PTRACE_COMMON_H */ | 164 | #endif /* _PPC64_PTRACE_COMMON_H */ |
diff --git a/include/asm-ppc64/ptrace.h b/include/asm-ppc64/ptrace.h index c96aad28fc08..3a55377f1fd3 100644 --- a/include/asm-ppc64/ptrace.h +++ b/include/asm-ppc64/ptrace.h | |||
@@ -25,56 +25,49 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
28 | #define PPC_REG unsigned long | 28 | |
29 | struct pt_regs { | 29 | struct pt_regs { |
30 | PPC_REG gpr[32]; | 30 | unsigned long gpr[32]; |
31 | PPC_REG nip; | 31 | unsigned long nip; |
32 | PPC_REG msr; | 32 | unsigned long msr; |
33 | PPC_REG orig_gpr3; /* Used for restarting system calls */ | 33 | unsigned long orig_gpr3; /* Used for restarting system calls */ |
34 | PPC_REG ctr; | 34 | unsigned long ctr; |
35 | PPC_REG link; | 35 | unsigned long link; |
36 | PPC_REG xer; | 36 | unsigned long xer; |
37 | PPC_REG ccr; | 37 | unsigned long ccr; |
38 | PPC_REG softe; /* Soft enabled/disabled */ | 38 | unsigned long softe; /* Soft enabled/disabled */ |
39 | PPC_REG trap; /* Reason for being here */ | 39 | unsigned long trap; /* Reason for being here */ |
40 | PPC_REG dar; /* Fault registers */ | 40 | unsigned long dar; /* Fault registers */ |
41 | PPC_REG dsisr; | 41 | unsigned long dsisr; |
42 | PPC_REG result; /* Result of a system call */ | 42 | unsigned long result; /* Result of a system call */ |
43 | }; | 43 | }; |
44 | 44 | ||
45 | #define PPC_REG_32 unsigned int | ||
46 | struct pt_regs32 { | 45 | struct pt_regs32 { |
47 | PPC_REG_32 gpr[32]; | 46 | unsigned int gpr[32]; |
48 | PPC_REG_32 nip; | 47 | unsigned int nip; |
49 | PPC_REG_32 msr; | 48 | unsigned int msr; |
50 | PPC_REG_32 orig_gpr3; /* Used for restarting system calls */ | 49 | unsigned int orig_gpr3; /* Used for restarting system calls */ |
51 | PPC_REG_32 ctr; | 50 | unsigned int ctr; |
52 | PPC_REG_32 link; | 51 | unsigned int link; |
53 | PPC_REG_32 xer; | 52 | unsigned int xer; |
54 | PPC_REG_32 ccr; | 53 | unsigned int ccr; |
55 | PPC_REG_32 mq; /* 601 only (not used at present) */ | 54 | unsigned int mq; /* 601 only (not used at present) */ |
56 | /* Used on APUS to hold IPL value. */ | 55 | unsigned int trap; /* Reason for being here */ |
57 | PPC_REG_32 trap; /* Reason for being here */ | 56 | unsigned int dar; /* Fault registers */ |
58 | PPC_REG_32 dar; /* Fault registers */ | 57 | unsigned int dsisr; |
59 | PPC_REG_32 dsisr; | 58 | unsigned int result; /* Result of a system call */ |
60 | PPC_REG_32 result; /* Result of a system call */ | ||
61 | }; | 59 | }; |
62 | 60 | ||
61 | #ifdef __KERNEL__ | ||
62 | |||
63 | #define instruction_pointer(regs) ((regs)->nip) | 63 | #define instruction_pointer(regs) ((regs)->nip) |
64 | |||
64 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
65 | extern unsigned long profile_pc(struct pt_regs *regs); | 66 | extern unsigned long profile_pc(struct pt_regs *regs); |
66 | #else | 67 | #else |
67 | #define profile_pc(regs) instruction_pointer(regs) | 68 | #define profile_pc(regs) instruction_pointer(regs) |
68 | #endif | 69 | #endif |
69 | 70 | ||
70 | #endif /* __ASSEMBLY__ */ | ||
71 | |||
72 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ | ||
73 | |||
74 | /* Size of dummy stack frame allocated when calling signal handler. */ | ||
75 | #define __SIGNAL_FRAMESIZE 128 | ||
76 | #define __SIGNAL_FRAMESIZE32 64 | ||
77 | |||
78 | #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) | 71 | #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) |
79 | 72 | ||
80 | #define force_successful_syscall_return() \ | 73 | #define force_successful_syscall_return() \ |
@@ -89,6 +82,16 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
89 | #define TRAP(regs) ((regs)->trap & ~0xF) | 82 | #define TRAP(regs) ((regs)->trap & ~0xF) |
90 | #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) | 83 | #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) |
91 | 84 | ||
85 | #endif /* __KERNEL__ */ | ||
86 | |||
87 | #endif /* __ASSEMBLY__ */ | ||
88 | |||
89 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ | ||
90 | |||
91 | /* Size of dummy stack frame allocated when calling signal handler. */ | ||
92 | #define __SIGNAL_FRAMESIZE 128 | ||
93 | #define __SIGNAL_FRAMESIZE32 64 | ||
94 | |||
92 | /* | 95 | /* |
93 | * Offsets used by 'ptrace' system call interface. | 96 | * Offsets used by 'ptrace' system call interface. |
94 | */ | 97 | */ |
@@ -135,17 +138,21 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
135 | #define PT_XER 37 | 138 | #define PT_XER 37 |
136 | #define PT_CCR 38 | 139 | #define PT_CCR 38 |
137 | #define PT_SOFTE 39 | 140 | #define PT_SOFTE 39 |
141 | #define PT_TRAP 40 | ||
142 | #define PT_DAR 41 | ||
143 | #define PT_DSISR 42 | ||
138 | #define PT_RESULT 43 | 144 | #define PT_RESULT 43 |
139 | 145 | ||
140 | #define PT_FPR0 48 | 146 | #define PT_FPR0 48 |
141 | 147 | ||
142 | /* Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will have | 148 | /* |
143 | * visibility to the asm-ppc/ptrace.h header instead of this one. | 149 | * Kernel and userspace will both use this PT_FPSCR value. 32-bit apps will |
150 | * have visibility to the asm-ppc/ptrace.h header instead of this one. | ||
144 | */ | 151 | */ |
145 | #define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ | 152 | #define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */ |
146 | 153 | ||
147 | #ifdef __KERNEL__ | 154 | #ifdef __KERNEL__ |
148 | #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */ | 155 | #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */ |
149 | #endif | 156 | #endif |
150 | 157 | ||
151 | #define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */ | 158 | #define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */ |
@@ -173,17 +180,34 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
173 | #define PTRACE_GETVRREGS 18 | 180 | #define PTRACE_GETVRREGS 18 |
174 | #define PTRACE_SETVRREGS 19 | 181 | #define PTRACE_SETVRREGS 19 |
175 | 182 | ||
176 | /* Additional PTRACE requests implemented on PowerPC. */ | 183 | /* |
177 | #define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ | 184 | * While we dont have 64bit book E processors, we need to reserve the |
178 | #define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ | 185 | * relevant ptrace calls for 32bit compatibility. |
179 | #define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ | 186 | */ |
180 | #define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ | 187 | #if 0 |
181 | #define PPC_PTRACE_PEEKTEXT_3264 0x95 /* Read word at location ADDR on a 64-bit process from a 32-bit process. */ | 188 | #define PTRACE_GETEVRREGS 20 |
182 | #define PPC_PTRACE_PEEKDATA_3264 0x94 /* Read word at location ADDR on a 64-bit process from a 32-bit process. */ | 189 | #define PTRACE_SETEVRREGS 21 |
183 | #define PPC_PTRACE_POKETEXT_3264 0x93 /* Write word at location ADDR on a 64-bit process from a 32-bit process. */ | 190 | #endif |
184 | #define PPC_PTRACE_POKEDATA_3264 0x92 /* Write word at location ADDR on a 64-bit process from a 32-bit process. */ | ||
185 | #define PPC_PTRACE_PEEKUSR_3264 0x91 /* Read a register (specified by ADDR) out of the "user area" on a 64-bit process from a 32-bit process. */ | ||
186 | #define PPC_PTRACE_POKEUSR_3264 0x90 /* Write DATA into location ADDR within the "user area" on a 64-bit process from a 32-bit process. */ | ||
187 | 191 | ||
192 | /* | ||
193 | * Get or set a debug register. The first 16 are DABR registers and the | ||
194 | * second 16 are IABR registers. | ||
195 | */ | ||
196 | #define PTRACE_GET_DEBUGREG 25 | ||
197 | #define PTRACE_SET_DEBUGREG 26 | ||
198 | |||
199 | /* Additional PTRACE requests implemented on PowerPC. */ | ||
200 | #define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ | ||
201 | #define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ | ||
202 | #define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ | ||
203 | #define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ | ||
204 | |||
205 | /* Calls to trace a 64bit program from a 32bit program */ | ||
206 | #define PPC_PTRACE_PEEKTEXT_3264 0x95 | ||
207 | #define PPC_PTRACE_PEEKDATA_3264 0x94 | ||
208 | #define PPC_PTRACE_POKETEXT_3264 0x93 | ||
209 | #define PPC_PTRACE_POKEDATA_3264 0x92 | ||
210 | #define PPC_PTRACE_PEEKUSR_3264 0x91 | ||
211 | #define PPC_PTRACE_POKEUSR_3264 0x90 | ||
188 | 212 | ||
189 | #endif /* _PPC64_PTRACE_H */ | 213 | #endif /* _PPC64_PTRACE_H */ |
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index acd11564dd75..14cb895bb607 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h | |||
@@ -15,36 +15,42 @@ | |||
15 | * modify it under the terms of the GNU General Public License | 15 | * modify it under the terms of the GNU General Public License |
16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
18 | * | ||
19 | * (the type definitions are in asm/spinlock_types.h) | ||
18 | */ | 20 | */ |
19 | #include <linux/config.h> | 21 | #include <linux/config.h> |
20 | #include <asm/paca.h> | 22 | #include <asm/paca.h> |
21 | #include <asm/hvcall.h> | 23 | #include <asm/hvcall.h> |
22 | #include <asm/iSeries/HvCall.h> | 24 | #include <asm/iSeries/HvCall.h> |
23 | 25 | ||
24 | typedef struct { | 26 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
25 | volatile unsigned int lock; | ||
26 | #ifdef CONFIG_PREEMPT | ||
27 | unsigned int break_lock; | ||
28 | #endif | ||
29 | } spinlock_t; | ||
30 | 27 | ||
31 | typedef struct { | 28 | /* |
32 | volatile signed int lock; | 29 | * This returns the old value in the lock, so we succeeded |
33 | #ifdef CONFIG_PREEMPT | 30 | * in getting the lock if the return value is 0. |
34 | unsigned int break_lock; | 31 | */ |
35 | #endif | 32 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) |
36 | } rwlock_t; | 33 | { |
34 | unsigned long tmp, tmp2; | ||
37 | 35 | ||
38 | #ifdef __KERNEL__ | 36 | __asm__ __volatile__( |
39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 37 | " lwz %1,%3(13) # __spin_trylock\n\ |
38 | 1: lwarx %0,0,%2\n\ | ||
39 | cmpwi 0,%0,0\n\ | ||
40 | bne- 2f\n\ | ||
41 | stwcx. %1,0,%2\n\ | ||
42 | bne- 1b\n\ | ||
43 | isync\n\ | ||
44 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
45 | : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) | ||
46 | : "cr0", "memory"); | ||
40 | 47 | ||
41 | #define spin_is_locked(x) ((x)->lock != 0) | 48 | return tmp; |
42 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 49 | } |
43 | 50 | ||
44 | static __inline__ void _raw_spin_unlock(spinlock_t *lock) | 51 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
45 | { | 52 | { |
46 | __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); | 53 | return __spin_trylock(lock) == 0; |
47 | lock->lock = 0; | ||
48 | } | 54 | } |
49 | 55 | ||
50 | /* | 56 | /* |
@@ -64,44 +70,15 @@ static __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
64 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 70 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
65 | /* We only yield to the hypervisor if we are in shared processor mode */ | 71 | /* We only yield to the hypervisor if we are in shared processor mode */ |
66 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) | 72 | #define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) |
67 | extern void __spin_yield(spinlock_t *lock); | 73 | extern void __spin_yield(raw_spinlock_t *lock); |
68 | extern void __rw_yield(rwlock_t *lock); | 74 | extern void __rw_yield(raw_rwlock_t *lock); |
69 | #else /* SPLPAR || ISERIES */ | 75 | #else /* SPLPAR || ISERIES */ |
70 | #define __spin_yield(x) barrier() | 76 | #define __spin_yield(x) barrier() |
71 | #define __rw_yield(x) barrier() | 77 | #define __rw_yield(x) barrier() |
72 | #define SHARED_PROCESSOR 0 | 78 | #define SHARED_PROCESSOR 0 |
73 | #endif | 79 | #endif |
74 | extern void spin_unlock_wait(spinlock_t *lock); | ||
75 | |||
76 | /* | ||
77 | * This returns the old value in the lock, so we succeeded | ||
78 | * in getting the lock if the return value is 0. | ||
79 | */ | ||
80 | static __inline__ unsigned long __spin_trylock(spinlock_t *lock) | ||
81 | { | ||
82 | unsigned long tmp, tmp2; | ||
83 | |||
84 | __asm__ __volatile__( | ||
85 | " lwz %1,%3(13) # __spin_trylock\n\ | ||
86 | 1: lwarx %0,0,%2\n\ | ||
87 | cmpwi 0,%0,0\n\ | ||
88 | bne- 2f\n\ | ||
89 | stwcx. %1,0,%2\n\ | ||
90 | bne- 1b\n\ | ||
91 | isync\n\ | ||
92 | 2:" : "=&r" (tmp), "=&r" (tmp2) | ||
93 | : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token)) | ||
94 | : "cr0", "memory"); | ||
95 | |||
96 | return tmp; | ||
97 | } | ||
98 | |||
99 | static int __inline__ _raw_spin_trylock(spinlock_t *lock) | ||
100 | { | ||
101 | return __spin_trylock(lock) == 0; | ||
102 | } | ||
103 | 80 | ||
104 | static void __inline__ _raw_spin_lock(spinlock_t *lock) | 81 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
105 | { | 82 | { |
106 | while (1) { | 83 | while (1) { |
107 | if (likely(__spin_trylock(lock) == 0)) | 84 | if (likely(__spin_trylock(lock) == 0)) |
@@ -110,12 +87,12 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock) | |||
110 | HMT_low(); | 87 | HMT_low(); |
111 | if (SHARED_PROCESSOR) | 88 | if (SHARED_PROCESSOR) |
112 | __spin_yield(lock); | 89 | __spin_yield(lock); |
113 | } while (unlikely(lock->lock != 0)); | 90 | } while (unlikely(lock->slock != 0)); |
114 | HMT_medium(); | 91 | HMT_medium(); |
115 | } | 92 | } |
116 | } | 93 | } |
117 | 94 | ||
118 | static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 95 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
119 | { | 96 | { |
120 | unsigned long flags_dis; | 97 | unsigned long flags_dis; |
121 | 98 | ||
@@ -128,12 +105,20 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
128 | HMT_low(); | 105 | HMT_low(); |
129 | if (SHARED_PROCESSOR) | 106 | if (SHARED_PROCESSOR) |
130 | __spin_yield(lock); | 107 | __spin_yield(lock); |
131 | } while (unlikely(lock->lock != 0)); | 108 | } while (unlikely(lock->slock != 0)); |
132 | HMT_medium(); | 109 | HMT_medium(); |
133 | local_irq_restore(flags_dis); | 110 | local_irq_restore(flags_dis); |
134 | } | 111 | } |
135 | } | 112 | } |
136 | 113 | ||
114 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) | ||
115 | { | ||
116 | __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); | ||
117 | lock->slock = 0; | ||
118 | } | ||
119 | |||
120 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | ||
121 | |||
137 | /* | 122 | /* |
138 | * Read-write spinlocks, allowing multiple readers | 123 | * Read-write spinlocks, allowing multiple readers |
139 | * but only one writer. | 124 | * but only one writer. |
@@ -144,24 +129,15 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag | |||
144 | * irq-safe write-lock, but readers can get non-irqsafe | 129 | * irq-safe write-lock, but readers can get non-irqsafe |
145 | * read-locks. | 130 | * read-locks. |
146 | */ | 131 | */ |
147 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
148 | 132 | ||
149 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 133 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
150 | 134 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
151 | #define read_can_lock(rw) ((rw)->lock >= 0) | ||
152 | #define write_can_lock(rw) (!(rw)->lock) | ||
153 | |||
154 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | ||
155 | { | ||
156 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
157 | rw->lock = 0; | ||
158 | } | ||
159 | 135 | ||
160 | /* | 136 | /* |
161 | * This returns the old value in the lock + 1, | 137 | * This returns the old value in the lock + 1, |
162 | * so we got a read lock if the return value is > 0. | 138 | * so we got a read lock if the return value is > 0. |
163 | */ | 139 | */ |
164 | static long __inline__ __read_trylock(rwlock_t *rw) | 140 | static long __inline__ __read_trylock(raw_rwlock_t *rw) |
165 | { | 141 | { |
166 | long tmp; | 142 | long tmp; |
167 | 143 | ||
@@ -180,45 +156,11 @@ static long __inline__ __read_trylock(rwlock_t *rw) | |||
180 | return tmp; | 156 | return tmp; |
181 | } | 157 | } |
182 | 158 | ||
183 | static int __inline__ _raw_read_trylock(rwlock_t *rw) | ||
184 | { | ||
185 | return __read_trylock(rw) > 0; | ||
186 | } | ||
187 | |||
188 | static void __inline__ _raw_read_lock(rwlock_t *rw) | ||
189 | { | ||
190 | while (1) { | ||
191 | if (likely(__read_trylock(rw) > 0)) | ||
192 | break; | ||
193 | do { | ||
194 | HMT_low(); | ||
195 | if (SHARED_PROCESSOR) | ||
196 | __rw_yield(rw); | ||
197 | } while (unlikely(rw->lock < 0)); | ||
198 | HMT_medium(); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void __inline__ _raw_read_unlock(rwlock_t *rw) | ||
203 | { | ||
204 | long tmp; | ||
205 | |||
206 | __asm__ __volatile__( | ||
207 | "eieio # read_unlock\n\ | ||
208 | 1: lwarx %0,0,%1\n\ | ||
209 | addic %0,%0,-1\n\ | ||
210 | stwcx. %0,0,%1\n\ | ||
211 | bne- 1b" | ||
212 | : "=&r"(tmp) | ||
213 | : "r"(&rw->lock) | ||
214 | : "cr0", "memory"); | ||
215 | } | ||
216 | |||
217 | /* | 159 | /* |
218 | * This returns the old value in the lock, | 160 | * This returns the old value in the lock, |
219 | * so we got the write lock if the return value is 0. | 161 | * so we got the write lock if the return value is 0. |
220 | */ | 162 | */ |
221 | static __inline__ long __write_trylock(rwlock_t *rw) | 163 | static __inline__ long __write_trylock(raw_rwlock_t *rw) |
222 | { | 164 | { |
223 | long tmp, tmp2; | 165 | long tmp, tmp2; |
224 | 166 | ||
@@ -237,12 +179,21 @@ static __inline__ long __write_trylock(rwlock_t *rw) | |||
237 | return tmp; | 179 | return tmp; |
238 | } | 180 | } |
239 | 181 | ||
240 | static int __inline__ _raw_write_trylock(rwlock_t *rw) | 182 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) |
241 | { | 183 | { |
242 | return __write_trylock(rw) == 0; | 184 | while (1) { |
185 | if (likely(__read_trylock(rw) > 0)) | ||
186 | break; | ||
187 | do { | ||
188 | HMT_low(); | ||
189 | if (SHARED_PROCESSOR) | ||
190 | __rw_yield(rw); | ||
191 | } while (unlikely(rw->lock < 0)); | ||
192 | HMT_medium(); | ||
193 | } | ||
243 | } | 194 | } |
244 | 195 | ||
245 | static void __inline__ _raw_write_lock(rwlock_t *rw) | 196 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) |
246 | { | 197 | { |
247 | while (1) { | 198 | while (1) { |
248 | if (likely(__write_trylock(rw) == 0)) | 199 | if (likely(__write_trylock(rw) == 0)) |
@@ -256,5 +207,35 @@ static void __inline__ _raw_write_lock(rwlock_t *rw) | |||
256 | } | 207 | } |
257 | } | 208 | } |
258 | 209 | ||
259 | #endif /* __KERNEL__ */ | 210 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) |
211 | { | ||
212 | return __read_trylock(rw) > 0; | ||
213 | } | ||
214 | |||
215 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | ||
216 | { | ||
217 | return __write_trylock(rw) == 0; | ||
218 | } | ||
219 | |||
220 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | ||
221 | { | ||
222 | long tmp; | ||
223 | |||
224 | __asm__ __volatile__( | ||
225 | "eieio # read_unlock\n\ | ||
226 | 1: lwarx %0,0,%1\n\ | ||
227 | addic %0,%0,-1\n\ | ||
228 | stwcx. %0,0,%1\n\ | ||
229 | bne- 1b" | ||
230 | : "=&r"(tmp) | ||
231 | : "r"(&rw->lock) | ||
232 | : "cr0", "memory"); | ||
233 | } | ||
234 | |||
235 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
236 | { | ||
237 | __asm__ __volatile__("lwsync # write_unlock": : :"memory"); | ||
238 | rw->lock = 0; | ||
239 | } | ||
240 | |||
260 | #endif /* __ASM_SPINLOCK_H */ | 241 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-ppc64/spinlock_types.h b/include/asm-ppc64/spinlock_types.h new file mode 100644 index 000000000000..a37c8eabb9f2 --- /dev/null +++ b/include/asm-ppc64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile signed int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index c0396428cc3c..375015c62f20 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h | |||
@@ -101,6 +101,9 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | |||
101 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | 101 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | extern int set_dabr(unsigned long dabr); | ||
105 | extern void _exception(int signr, struct pt_regs *regs, int code, | ||
106 | unsigned long addr); | ||
104 | extern int fix_alignment(struct pt_regs *regs); | 107 | extern int fix_alignment(struct pt_regs *regs); |
105 | extern void bad_page_fault(struct pt_regs *regs, unsigned long address, | 108 | extern void bad_page_fault(struct pt_regs *regs, unsigned long address, |
106 | int sig); | 109 | int sig); |
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h index 321b23bba1ec..273dbecf8ace 100644 --- a/include/asm-s390/spinlock.h +++ b/include/asm-s390/spinlock.h | |||
@@ -27,25 +27,19 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
27 | * on the local processor, one does not. | 27 | * on the local processor, one does not. |
28 | * | 28 | * |
29 | * We make no fairness assumptions. They have a cost. | 29 | * We make no fairness assumptions. They have a cost. |
30 | * | ||
31 | * (the type definitions are in asm/spinlock_types.h) | ||
30 | */ | 32 | */ |
31 | 33 | ||
32 | typedef struct { | 34 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
33 | volatile unsigned int lock; | 35 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
34 | #ifdef CONFIG_PREEMPT | 36 | #define __raw_spin_unlock_wait(lock) \ |
35 | unsigned int break_lock; | 37 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
36 | #endif | ||
37 | } __attribute__ ((aligned (4))) spinlock_t; | ||
38 | |||
39 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
40 | #define spin_lock_init(lp) do { (lp)->lock = 0; } while(0) | ||
41 | #define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock) | ||
42 | #define spin_is_locked(x) ((x)->lock != 0) | ||
43 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
44 | 38 | ||
45 | extern void _raw_spin_lock_wait(spinlock_t *lp, unsigned int pc); | 39 | extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc); |
46 | extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc); | 40 | extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc); |
47 | 41 | ||
48 | static inline void _raw_spin_lock(spinlock_t *lp) | 42 | static inline void __raw_spin_lock(raw_spinlock_t *lp) |
49 | { | 43 | { |
50 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 44 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
51 | 45 | ||
@@ -53,7 +47,7 @@ static inline void _raw_spin_lock(spinlock_t *lp) | |||
53 | _raw_spin_lock_wait(lp, pc); | 47 | _raw_spin_lock_wait(lp, pc); |
54 | } | 48 | } |
55 | 49 | ||
56 | static inline int _raw_spin_trylock(spinlock_t *lp) | 50 | static inline int __raw_spin_trylock(raw_spinlock_t *lp) |
57 | { | 51 | { |
58 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); | 52 | unsigned long pc = 1 | (unsigned long) __builtin_return_address(0); |
59 | 53 | ||
@@ -62,7 +56,7 @@ static inline int _raw_spin_trylock(spinlock_t *lp) | |||
62 | return _raw_spin_trylock_retry(lp, pc); | 56 | return _raw_spin_trylock_retry(lp, pc); |
63 | } | 57 | } |
64 | 58 | ||
65 | static inline void _raw_spin_unlock(spinlock_t *lp) | 59 | static inline void __raw_spin_unlock(raw_spinlock_t *lp) |
66 | { | 60 | { |
67 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); | 61 | _raw_compare_and_swap(&lp->lock, lp->lock, 0); |
68 | } | 62 | } |
@@ -77,36 +71,25 @@ static inline void _raw_spin_unlock(spinlock_t *lp) | |||
77 | * irq-safe write-lock, but readers can get non-irqsafe | 71 | * irq-safe write-lock, but readers can get non-irqsafe |
78 | * read-locks. | 72 | * read-locks. |
79 | */ | 73 | */ |
80 | typedef struct { | ||
81 | volatile unsigned int lock; | ||
82 | volatile unsigned long owner_pc; | ||
83 | #ifdef CONFIG_PREEMPT | ||
84 | unsigned int break_lock; | ||
85 | #endif | ||
86 | } rwlock_t; | ||
87 | |||
88 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
89 | |||
90 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
91 | 74 | ||
92 | /** | 75 | /** |
93 | * read_can_lock - would read_trylock() succeed? | 76 | * read_can_lock - would read_trylock() succeed? |
94 | * @lock: the rwlock in question. | 77 | * @lock: the rwlock in question. |
95 | */ | 78 | */ |
96 | #define read_can_lock(x) ((int)(x)->lock >= 0) | 79 | #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) |
97 | 80 | ||
98 | /** | 81 | /** |
99 | * write_can_lock - would write_trylock() succeed? | 82 | * write_can_lock - would write_trylock() succeed? |
100 | * @lock: the rwlock in question. | 83 | * @lock: the rwlock in question. |
101 | */ | 84 | */ |
102 | #define write_can_lock(x) ((x)->lock == 0) | 85 | #define __raw_write_can_lock(x) ((x)->lock == 0) |
103 | 86 | ||
104 | extern void _raw_read_lock_wait(rwlock_t *lp); | 87 | extern void _raw_read_lock_wait(raw_rwlock_t *lp); |
105 | extern int _raw_read_trylock_retry(rwlock_t *lp); | 88 | extern int _raw_read_trylock_retry(raw_rwlock_t *lp); |
106 | extern void _raw_write_lock_wait(rwlock_t *lp); | 89 | extern void _raw_write_lock_wait(raw_rwlock_t *lp); |
107 | extern int _raw_write_trylock_retry(rwlock_t *lp); | 90 | extern int _raw_write_trylock_retry(raw_rwlock_t *lp); |
108 | 91 | ||
109 | static inline void _raw_read_lock(rwlock_t *rw) | 92 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
110 | { | 93 | { |
111 | unsigned int old; | 94 | unsigned int old; |
112 | old = rw->lock & 0x7fffffffU; | 95 | old = rw->lock & 0x7fffffffU; |
@@ -114,7 +97,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
114 | _raw_read_lock_wait(rw); | 97 | _raw_read_lock_wait(rw); |
115 | } | 98 | } |
116 | 99 | ||
117 | static inline void _raw_read_unlock(rwlock_t *rw) | 100 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
118 | { | 101 | { |
119 | unsigned int old, cmp; | 102 | unsigned int old, cmp; |
120 | 103 | ||
@@ -125,18 +108,18 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
125 | } while (cmp != old); | 108 | } while (cmp != old); |
126 | } | 109 | } |
127 | 110 | ||
128 | static inline void _raw_write_lock(rwlock_t *rw) | 111 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
129 | { | 112 | { |
130 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 113 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
131 | _raw_write_lock_wait(rw); | 114 | _raw_write_lock_wait(rw); |
132 | } | 115 | } |
133 | 116 | ||
134 | static inline void _raw_write_unlock(rwlock_t *rw) | 117 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
135 | { | 118 | { |
136 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); | 119 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
137 | } | 120 | } |
138 | 121 | ||
139 | static inline int _raw_read_trylock(rwlock_t *rw) | 122 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
140 | { | 123 | { |
141 | unsigned int old; | 124 | unsigned int old; |
142 | old = rw->lock & 0x7fffffffU; | 125 | old = rw->lock & 0x7fffffffU; |
@@ -145,7 +128,7 @@ static inline int _raw_read_trylock(rwlock_t *rw) | |||
145 | return _raw_read_trylock_retry(rw); | 128 | return _raw_read_trylock_retry(rw); |
146 | } | 129 | } |
147 | 130 | ||
148 | static inline int _raw_write_trylock(rwlock_t *rw) | 131 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
149 | { | 132 | { |
150 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 133 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) |
151 | return 1; | 134 | return 1; |
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h new file mode 100644 index 000000000000..f79a2216204f --- /dev/null +++ b/include/asm-s390/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } __attribute__ ((aligned (4))) raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | volatile unsigned int owner_pc; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h index 831e52ee45b5..614a8c13b721 100644 --- a/include/asm-sh/irq.h +++ b/include/asm-sh/irq.h | |||
@@ -587,10 +587,6 @@ static inline int generic_irq_demux(int irq) | |||
587 | #define irq_canonicalize(irq) (irq) | 587 | #define irq_canonicalize(irq) (irq) |
588 | #define irq_demux(irq) __irq_demux(sh_mv.mv_irq_demux(irq)) | 588 | #define irq_demux(irq) __irq_demux(sh_mv.mv_irq_demux(irq)) |
589 | 589 | ||
590 | struct irqaction; | ||
591 | struct pt_regs; | ||
592 | int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); | ||
593 | |||
594 | #if defined(CONFIG_CPU_SUBTYPE_SH73180) | 590 | #if defined(CONFIG_CPU_SUBTYPE_SH73180) |
595 | #include <asm/irq-sh73180.h> | 591 | #include <asm/irq-sh73180.h> |
596 | #endif | 592 | #endif |
diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h index e770b55649eb..846322d4c35d 100644 --- a/include/asm-sh/spinlock.h +++ b/include/asm-sh/spinlock.h | |||
@@ -15,20 +15,11 @@ | |||
15 | /* | 15 | /* |
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
17 | */ | 17 | */ |
18 | typedef struct { | ||
19 | volatile unsigned long lock; | ||
20 | #ifdef CONFIG_PREEMPT | ||
21 | unsigned int break_lock; | ||
22 | #endif | ||
23 | } spinlock_t; | ||
24 | 18 | ||
25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
26 | 20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
27 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | 21 | #define __raw_spin_unlock_wait(x) \ |
28 | 22 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
32 | 23 | ||
33 | /* | 24 | /* |
34 | * Simple spin lock operations. There are two variants, one clears IRQ's | 25 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -36,7 +27,7 @@ typedef struct { | |||
36 | * | 27 | * |
37 | * We make no fairness assumptions. They have a cost. | 28 | * We make no fairness assumptions. They have a cost. |
38 | */ | 29 | */ |
39 | static inline void _raw_spin_lock(spinlock_t *lock) | 30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
40 | { | 31 | { |
41 | __asm__ __volatile__ ( | 32 | __asm__ __volatile__ ( |
42 | "1:\n\t" | 33 | "1:\n\t" |
@@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
49 | ); | 40 | ); |
50 | } | 41 | } |
51 | 42 | ||
52 | static inline void _raw_spin_unlock(spinlock_t *lock) | 43 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
53 | { | 44 | { |
54 | assert_spin_locked(lock); | 45 | assert_spin_locked(lock); |
55 | 46 | ||
56 | lock->lock = 0; | 47 | lock->lock = 0; |
57 | } | 48 | } |
58 | 49 | ||
59 | #define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) | 50 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) |
60 | 51 | ||
61 | /* | 52 | /* |
62 | * Read-write spinlocks, allowing multiple readers but only one writer. | 53 | * Read-write spinlocks, allowing multiple readers but only one writer. |
@@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
66 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | 57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
67 | * read-locks. | 58 | * read-locks. |
68 | */ | 59 | */ |
69 | typedef struct { | 60 | |
70 | spinlock_t lock; | 61 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
71 | atomic_t counter; | ||
72 | #ifdef CONFIG_PREEMPT | ||
73 | unsigned int break_lock; | ||
74 | #endif | ||
75 | } rwlock_t; | ||
76 | |||
77 | #define RW_LOCK_BIAS 0x01000000 | ||
78 | #define RW_LOCK_UNLOCKED (rwlock_t) { { 0 }, { RW_LOCK_BIAS } } | ||
79 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
80 | |||
81 | static inline void _raw_read_lock(rwlock_t *rw) | ||
82 | { | 62 | { |
83 | _raw_spin_lock(&rw->lock); | 63 | __raw_spin_lock(&rw->lock); |
84 | 64 | ||
85 | atomic_inc(&rw->counter); | 65 | atomic_inc(&rw->counter); |
86 | 66 | ||
87 | _raw_spin_unlock(&rw->lock); | 67 | __raw_spin_unlock(&rw->lock); |
88 | } | 68 | } |
89 | 69 | ||
90 | static inline void _raw_read_unlock(rwlock_t *rw) | 70 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
91 | { | 71 | { |
92 | _raw_spin_lock(&rw->lock); | 72 | __raw_spin_lock(&rw->lock); |
93 | 73 | ||
94 | atomic_dec(&rw->counter); | 74 | atomic_dec(&rw->counter); |
95 | 75 | ||
96 | _raw_spin_unlock(&rw->lock); | 76 | __raw_spin_unlock(&rw->lock); |
97 | } | 77 | } |
98 | 78 | ||
99 | static inline void _raw_write_lock(rwlock_t *rw) | 79 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
100 | { | 80 | { |
101 | _raw_spin_lock(&rw->lock); | 81 | __raw_spin_lock(&rw->lock); |
102 | atomic_set(&rw->counter, -1); | 82 | atomic_set(&rw->counter, -1); |
103 | } | 83 | } |
104 | 84 | ||
105 | static inline void _raw_write_unlock(rwlock_t *rw) | 85 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
106 | { | 86 | { |
107 | atomic_set(&rw->counter, 0); | 87 | atomic_set(&rw->counter, 0); |
108 | _raw_spin_unlock(&rw->lock); | 88 | __raw_spin_unlock(&rw->lock); |
109 | } | 89 | } |
110 | 90 | ||
111 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 91 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
112 | 92 | ||
113 | static inline int _raw_write_trylock(rwlock_t *rw) | 93 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
114 | { | 94 | { |
115 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | 95 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) |
116 | return 1; | 96 | return 1; |
@@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
121 | } | 101 | } |
122 | 102 | ||
123 | #endif /* __ASM_SH_SPINLOCK_H */ | 103 | #endif /* __ASM_SH_SPINLOCK_H */ |
124 | |||
diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h new file mode 100644 index 000000000000..8c41b6c3aac8 --- /dev/null +++ b/include/asm-sh/spinlock_types.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_SH_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SH_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned long lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | raw_spinlock_t lock; | ||
16 | atomic_t counter; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define RW_LOCK_BIAS 0x01000000 | ||
20 | #define __RAW_RW_LOCK_UNLOCKED { { 0 }, { RW_LOCK_BIAS } } | ||
21 | |||
22 | #endif | ||
diff --git a/include/asm-sparc/ptrace.h b/include/asm-sparc/ptrace.h index dd9d94d7e0ae..a8ecb2d6977a 100644 --- a/include/asm-sparc/ptrace.h +++ b/include/asm-sparc/ptrace.h | |||
@@ -73,11 +73,11 @@ extern void show_regs(struct pt_regs *); | |||
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * The asm_offsets.h is a generated file, so we cannot include it. | 76 | * The asm-offsets.h is a generated file, so we cannot include it. |
77 | * It may be OK for glibc headers, but it's utterly pointless for C code. | 77 | * It may be OK for glibc headers, but it's utterly pointless for C code. |
78 | * The assembly code using those offsets has to include it explicitly. | 78 | * The assembly code using those offsets has to include it explicitly. |
79 | */ | 79 | */ |
80 | /* #include <asm/asm_offsets.h> */ | 80 | /* #include <asm/asm-offsets.h> */ |
81 | 81 | ||
82 | /* These are for pt_regs. */ | 82 | /* These are for pt_regs. */ |
83 | #define PT_PSR 0x0 | 83 | #define PT_PSR 0x0 |
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index 0cbd87ad4912..111727a2bb4e 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h | |||
@@ -12,96 +12,12 @@ | |||
12 | 12 | ||
13 | #include <asm/psr.h> | 13 | #include <asm/psr.h> |
14 | 14 | ||
15 | #ifdef CONFIG_DEBUG_SPINLOCK | 15 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
16 | struct _spinlock_debug { | ||
17 | unsigned char lock; | ||
18 | unsigned long owner_pc; | ||
19 | #ifdef CONFIG_PREEMPT | ||
20 | unsigned int break_lock; | ||
21 | #endif | ||
22 | }; | ||
23 | typedef struct _spinlock_debug spinlock_t; | ||
24 | |||
25 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 } | ||
26 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
27 | #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0) | ||
28 | #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock)) | ||
29 | |||
30 | extern void _do_spin_lock(spinlock_t *lock, char *str); | ||
31 | extern int _spin_trylock(spinlock_t *lock); | ||
32 | extern void _do_spin_unlock(spinlock_t *lock); | ||
33 | |||
34 | #define _raw_spin_trylock(lp) _spin_trylock(lp) | ||
35 | #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") | ||
36 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
37 | |||
38 | struct _rwlock_debug { | ||
39 | volatile unsigned int lock; | ||
40 | unsigned long owner_pc; | ||
41 | unsigned long reader_pc[NR_CPUS]; | ||
42 | #ifdef CONFIG_PREEMPT | ||
43 | unsigned int break_lock; | ||
44 | #endif | ||
45 | }; | ||
46 | typedef struct _rwlock_debug rwlock_t; | ||
47 | |||
48 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} } | ||
49 | |||
50 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
51 | |||
52 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
53 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
54 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
55 | extern void _do_write_unlock(rwlock_t *rw); | ||
56 | |||
57 | #define _raw_read_lock(lock) \ | ||
58 | do { unsigned long flags; \ | ||
59 | local_irq_save(flags); \ | ||
60 | _do_read_lock(lock, "read_lock"); \ | ||
61 | local_irq_restore(flags); \ | ||
62 | } while(0) | ||
63 | |||
64 | #define _raw_read_unlock(lock) \ | ||
65 | do { unsigned long flags; \ | ||
66 | local_irq_save(flags); \ | ||
67 | _do_read_unlock(lock, "read_unlock"); \ | ||
68 | local_irq_restore(flags); \ | ||
69 | } while(0) | ||
70 | |||
71 | #define _raw_write_lock(lock) \ | ||
72 | do { unsigned long flags; \ | ||
73 | local_irq_save(flags); \ | ||
74 | _do_write_lock(lock, "write_lock"); \ | ||
75 | local_irq_restore(flags); \ | ||
76 | } while(0) | ||
77 | |||
78 | #define _raw_write_unlock(lock) \ | ||
79 | do { unsigned long flags; \ | ||
80 | local_irq_save(flags); \ | ||
81 | _do_write_unlock(lock); \ | ||
82 | local_irq_restore(flags); \ | ||
83 | } while(0) | ||
84 | |||
85 | #else /* !CONFIG_DEBUG_SPINLOCK */ | ||
86 | |||
87 | typedef struct { | ||
88 | unsigned char lock; | ||
89 | #ifdef CONFIG_PREEMPT | ||
90 | unsigned int break_lock; | ||
91 | #endif | ||
92 | } spinlock_t; | ||
93 | |||
94 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
95 | |||
96 | #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) | ||
97 | #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | ||
98 | 16 | ||
99 | #define spin_unlock_wait(lock) \ | 17 | #define __raw_spin_unlock_wait(lock) \ |
100 | do { \ | 18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
101 | barrier(); \ | ||
102 | } while(*((volatile unsigned char *)lock)) | ||
103 | 19 | ||
104 | extern __inline__ void _raw_spin_lock(spinlock_t *lock) | 20 | extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) |
105 | { | 21 | { |
106 | __asm__ __volatile__( | 22 | __asm__ __volatile__( |
107 | "\n1:\n\t" | 23 | "\n1:\n\t" |
@@ -121,7 +37,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock) | |||
121 | : "g2", "memory", "cc"); | 37 | : "g2", "memory", "cc"); |
122 | } | 38 | } |
123 | 39 | ||
124 | extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | 40 | extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) |
125 | { | 41 | { |
126 | unsigned int result; | 42 | unsigned int result; |
127 | __asm__ __volatile__("ldstub [%1], %0" | 43 | __asm__ __volatile__("ldstub [%1], %0" |
@@ -131,7 +47,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock) | |||
131 | return (result == 0); | 47 | return (result == 0); |
132 | } | 48 | } |
133 | 49 | ||
134 | extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | 50 | extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
135 | { | 51 | { |
136 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
137 | } | 53 | } |
@@ -147,23 +63,11 @@ extern __inline__ void _raw_spin_unlock(spinlock_t *lock) | |||
147 | * | 63 | * |
148 | * XXX This might create some problems with my dual spinlock | 64 | * XXX This might create some problems with my dual spinlock |
149 | * XXX scheme, deadlocks etc. -DaveM | 65 | * XXX scheme, deadlocks etc. -DaveM |
150 | */ | 66 | * |
151 | typedef struct { | 67 | * Sort of like atomic_t's on Sparc, but even more clever. |
152 | volatile unsigned int lock; | ||
153 | #ifdef CONFIG_PREEMPT | ||
154 | unsigned int break_lock; | ||
155 | #endif | ||
156 | } rwlock_t; | ||
157 | |||
158 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
159 | |||
160 | #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | |||
163 | /* Sort of like atomic_t's on Sparc, but even more clever. | ||
164 | * | 68 | * |
165 | * ------------------------------------ | 69 | * ------------------------------------ |
166 | * | 24-bit counter | wlock | rwlock_t | 70 | * | 24-bit counter | wlock | raw_rwlock_t |
167 | * ------------------------------------ | 71 | * ------------------------------------ |
168 | * 31 8 7 0 | 72 | * 31 8 7 0 |
169 | * | 73 | * |
@@ -174,9 +78,9 @@ typedef struct { | |||
174 | * | 78 | * |
175 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
176 | */ | 80 | */ |
177 | extern __inline__ void _read_lock(rwlock_t *rw) | 81 | extern __inline__ void __read_lock(raw_rwlock_t *rw) |
178 | { | 82 | { |
179 | register rwlock_t *lp asm("g1"); | 83 | register raw_rwlock_t *lp asm("g1"); |
180 | lp = rw; | 84 | lp = rw; |
181 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
182 | "mov %%o7, %%g4\n\t" | 86 | "mov %%o7, %%g4\n\t" |
@@ -187,16 +91,16 @@ extern __inline__ void _read_lock(rwlock_t *rw) | |||
187 | : "g2", "g4", "memory", "cc"); | 91 | : "g2", "g4", "memory", "cc"); |
188 | } | 92 | } |
189 | 93 | ||
190 | #define _raw_read_lock(lock) \ | 94 | #define __raw_read_lock(lock) \ |
191 | do { unsigned long flags; \ | 95 | do { unsigned long flags; \ |
192 | local_irq_save(flags); \ | 96 | local_irq_save(flags); \ |
193 | _read_lock(lock); \ | 97 | __raw_read_lock(lock); \ |
194 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
195 | } while(0) | 99 | } while(0) |
196 | 100 | ||
197 | extern __inline__ void _read_unlock(rwlock_t *rw) | 101 | extern __inline__ void __read_unlock(raw_rwlock_t *rw) |
198 | { | 102 | { |
199 | register rwlock_t *lp asm("g1"); | 103 | register raw_rwlock_t *lp asm("g1"); |
200 | lp = rw; | 104 | lp = rw; |
201 | __asm__ __volatile__( | 105 | __asm__ __volatile__( |
202 | "mov %%o7, %%g4\n\t" | 106 | "mov %%o7, %%g4\n\t" |
@@ -207,16 +111,16 @@ extern __inline__ void _read_unlock(rwlock_t *rw) | |||
207 | : "g2", "g4", "memory", "cc"); | 111 | : "g2", "g4", "memory", "cc"); |
208 | } | 112 | } |
209 | 113 | ||
210 | #define _raw_read_unlock(lock) \ | 114 | #define __raw_read_unlock(lock) \ |
211 | do { unsigned long flags; \ | 115 | do { unsigned long flags; \ |
212 | local_irq_save(flags); \ | 116 | local_irq_save(flags); \ |
213 | _read_unlock(lock); \ | 117 | __raw_read_unlock(lock); \ |
214 | local_irq_restore(flags); \ | 118 | local_irq_restore(flags); \ |
215 | } while(0) | 119 | } while(0) |
216 | 120 | ||
217 | extern __inline__ void _raw_write_lock(rwlock_t *rw) | 121 | extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
218 | { | 122 | { |
219 | register rwlock_t *lp asm("g1"); | 123 | register raw_rwlock_t *lp asm("g1"); |
220 | lp = rw; | 124 | lp = rw; |
221 | __asm__ __volatile__( | 125 | __asm__ __volatile__( |
222 | "mov %%o7, %%g4\n\t" | 126 | "mov %%o7, %%g4\n\t" |
@@ -227,11 +131,9 @@ extern __inline__ void _raw_write_lock(rwlock_t *rw) | |||
227 | : "g2", "g4", "memory", "cc"); | 131 | : "g2", "g4", "memory", "cc"); |
228 | } | 132 | } |
229 | 133 | ||
230 | #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 134 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
231 | |||
232 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
233 | 135 | ||
234 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 136 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
235 | 137 | ||
236 | #endif /* !(__ASSEMBLY__) */ | 138 | #endif /* !(__ASSEMBLY__) */ |
237 | 139 | ||
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h new file mode 100644 index 000000000000..0a0fb116c4ec --- /dev/null +++ b/include/asm-sparc/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __SPARC_SPINLOCK_TYPES_H | ||
2 | #define __SPARC_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | unsigned char lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index a02c4370eb42..ec85d12d73b9 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h | |||
@@ -29,24 +29,13 @@ | |||
29 | * must be pre-V9 branches. | 29 | * must be pre-V9 branches. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #ifndef CONFIG_DEBUG_SPINLOCK | 32 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) |
33 | 33 | ||
34 | typedef struct { | 34 | #define __raw_spin_unlock_wait(lp) \ |
35 | volatile unsigned char lock; | 35 | do { rmb(); \ |
36 | #ifdef CONFIG_PREEMPT | 36 | } while((lp)->lock) |
37 | unsigned int break_lock; | ||
38 | #endif | ||
39 | } spinlock_t; | ||
40 | #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} | ||
41 | 37 | ||
42 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | 38 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
43 | #define spin_is_locked(lp) ((lp)->lock != 0) | ||
44 | |||
45 | #define spin_unlock_wait(lp) \ | ||
46 | do { rmb(); \ | ||
47 | } while((lp)->lock) | ||
48 | |||
49 | static inline void _raw_spin_lock(spinlock_t *lock) | ||
50 | { | 39 | { |
51 | unsigned long tmp; | 40 | unsigned long tmp; |
52 | 41 | ||
@@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
67 | : "memory"); | 56 | : "memory"); |
68 | } | 57 | } |
69 | 58 | ||
70 | static inline int _raw_spin_trylock(spinlock_t *lock) | 59 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
71 | { | 60 | { |
72 | unsigned long result; | 61 | unsigned long result; |
73 | 62 | ||
@@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
81 | return (result == 0UL); | 70 | return (result == 0UL); |
82 | } | 71 | } |
83 | 72 | ||
84 | static inline void _raw_spin_unlock(spinlock_t *lock) | 73 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
85 | { | 74 | { |
86 | __asm__ __volatile__( | 75 | __asm__ __volatile__( |
87 | " membar #StoreStore | #LoadStore\n" | 76 | " membar #StoreStore | #LoadStore\n" |
@@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
91 | : "memory"); | 80 | : "memory"); |
92 | } | 81 | } |
93 | 82 | ||
94 | static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | 83 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
95 | { | 84 | { |
96 | unsigned long tmp1, tmp2; | 85 | unsigned long tmp1, tmp2; |
97 | 86 | ||
@@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | |||
115 | : "memory"); | 104 | : "memory"); |
116 | } | 105 | } |
117 | 106 | ||
118 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
119 | |||
120 | typedef struct { | ||
121 | volatile unsigned char lock; | ||
122 | unsigned int owner_pc, owner_cpu; | ||
123 | #ifdef CONFIG_PREEMPT | ||
124 | unsigned int break_lock; | ||
125 | #endif | ||
126 | } spinlock_t; | ||
127 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } | ||
128 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) | ||
129 | #define spin_is_locked(__lock) ((__lock)->lock != 0) | ||
130 | #define spin_unlock_wait(__lock) \ | ||
131 | do { \ | ||
132 | rmb(); \ | ||
133 | } while((__lock)->lock) | ||
134 | |||
135 | extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller); | ||
136 | extern void _do_spin_unlock(spinlock_t *lock); | ||
137 | extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller); | ||
138 | |||
139 | #define _raw_spin_trylock(lp) \ | ||
140 | _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0)) | ||
141 | #define _raw_spin_lock(lock) \ | ||
142 | _do_spin_lock(lock, "spin_lock", \ | ||
143 | (unsigned long) __builtin_return_address(0)) | ||
144 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | ||
145 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
146 | |||
147 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
148 | |||
149 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 107 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
150 | 108 | ||
151 | #ifndef CONFIG_DEBUG_SPINLOCK | 109 | static void inline __read_lock(raw_rwlock_t *lock) |
152 | |||
153 | typedef struct { | ||
154 | volatile unsigned int lock; | ||
155 | #ifdef CONFIG_PREEMPT | ||
156 | unsigned int break_lock; | ||
157 | #endif | ||
158 | } rwlock_t; | ||
159 | #define RW_LOCK_UNLOCKED (rwlock_t) {0,} | ||
160 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | static void inline __read_lock(rwlock_t *lock) | ||
163 | { | 110 | { |
164 | unsigned long tmp1, tmp2; | 111 | unsigned long tmp1, tmp2; |
165 | 112 | ||
@@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock) | |||
184 | : "memory"); | 131 | : "memory"); |
185 | } | 132 | } |
186 | 133 | ||
187 | static void inline __read_unlock(rwlock_t *lock) | 134 | static void inline __read_unlock(raw_rwlock_t *lock) |
188 | { | 135 | { |
189 | unsigned long tmp1, tmp2; | 136 | unsigned long tmp1, tmp2; |
190 | 137 | ||
@@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock) | |||
201 | : "memory"); | 148 | : "memory"); |
202 | } | 149 | } |
203 | 150 | ||
204 | static void inline __write_lock(rwlock_t *lock) | 151 | static void inline __write_lock(raw_rwlock_t *lock) |
205 | { | 152 | { |
206 | unsigned long mask, tmp1, tmp2; | 153 | unsigned long mask, tmp1, tmp2; |
207 | 154 | ||
@@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock) | |||
228 | : "memory"); | 175 | : "memory"); |
229 | } | 176 | } |
230 | 177 | ||
231 | static void inline __write_unlock(rwlock_t *lock) | 178 | static void inline __write_unlock(raw_rwlock_t *lock) |
232 | { | 179 | { |
233 | __asm__ __volatile__( | 180 | __asm__ __volatile__( |
234 | " membar #LoadStore | #StoreStore\n" | 181 | " membar #LoadStore | #StoreStore\n" |
@@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock) | |||
238 | : "memory"); | 185 | : "memory"); |
239 | } | 186 | } |
240 | 187 | ||
241 | static int inline __write_trylock(rwlock_t *lock) | 188 | static int inline __write_trylock(raw_rwlock_t *lock) |
242 | { | 189 | { |
243 | unsigned long mask, tmp1, tmp2, result; | 190 | unsigned long mask, tmp1, tmp2, result; |
244 | 191 | ||
@@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock) | |||
263 | return result; | 210 | return result; |
264 | } | 211 | } |
265 | 212 | ||
266 | #define _raw_read_lock(p) __read_lock(p) | 213 | #define __raw_read_lock(p) __read_lock(p) |
267 | #define _raw_read_unlock(p) __read_unlock(p) | 214 | #define __raw_read_unlock(p) __read_unlock(p) |
268 | #define _raw_write_lock(p) __write_lock(p) | 215 | #define __raw_write_lock(p) __write_lock(p) |
269 | #define _raw_write_unlock(p) __write_unlock(p) | 216 | #define __raw_write_unlock(p) __write_unlock(p) |
270 | #define _raw_write_trylock(p) __write_trylock(p) | 217 | #define __raw_write_trylock(p) __write_trylock(p) |
271 | 218 | ||
272 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | 219 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
273 | 220 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | |
274 | typedef struct { | 221 | #define __raw_write_can_lock(rw) (!(rw)->lock) |
275 | volatile unsigned long lock; | ||
276 | unsigned int writer_pc, writer_cpu; | ||
277 | unsigned int reader_pc[NR_CPUS]; | ||
278 | #ifdef CONFIG_PREEMPT | ||
279 | unsigned int break_lock; | ||
280 | #endif | ||
281 | } rwlock_t; | ||
282 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } | ||
283 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | ||
284 | |||
285 | extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
286 | extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller); | ||
287 | extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller); | ||
288 | extern void _do_write_unlock(rwlock_t *rw, unsigned long caller); | ||
289 | extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller); | ||
290 | |||
291 | #define _raw_read_lock(lock) \ | ||
292 | do { unsigned long flags; \ | ||
293 | local_irq_save(flags); \ | ||
294 | _do_read_lock(lock, "read_lock", \ | ||
295 | (unsigned long) __builtin_return_address(0)); \ | ||
296 | local_irq_restore(flags); \ | ||
297 | } while(0) | ||
298 | |||
299 | #define _raw_read_unlock(lock) \ | ||
300 | do { unsigned long flags; \ | ||
301 | local_irq_save(flags); \ | ||
302 | _do_read_unlock(lock, "read_unlock", \ | ||
303 | (unsigned long) __builtin_return_address(0)); \ | ||
304 | local_irq_restore(flags); \ | ||
305 | } while(0) | ||
306 | |||
307 | #define _raw_write_lock(lock) \ | ||
308 | do { unsigned long flags; \ | ||
309 | local_irq_save(flags); \ | ||
310 | _do_write_lock(lock, "write_lock", \ | ||
311 | (unsigned long) __builtin_return_address(0)); \ | ||
312 | local_irq_restore(flags); \ | ||
313 | } while(0) | ||
314 | |||
315 | #define _raw_write_unlock(lock) \ | ||
316 | do { unsigned long flags; \ | ||
317 | local_irq_save(flags); \ | ||
318 | _do_write_unlock(lock, \ | ||
319 | (unsigned long) __builtin_return_address(0)); \ | ||
320 | local_irq_restore(flags); \ | ||
321 | } while(0) | ||
322 | |||
323 | #define _raw_write_trylock(lock) \ | ||
324 | ({ unsigned long flags; \ | ||
325 | int val; \ | ||
326 | local_irq_save(flags); \ | ||
327 | val = _do_write_trylock(lock, "write_trylock", \ | ||
328 | (unsigned long) __builtin_return_address(0)); \ | ||
329 | local_irq_restore(flags); \ | ||
330 | val; \ | ||
331 | }) | ||
332 | |||
333 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
334 | |||
335 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
336 | #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | ||
337 | #define write_can_lock(rw) (!(rw)->lock) | ||
338 | 222 | ||
339 | #endif /* !(__ASSEMBLY__) */ | 223 | #endif /* !(__ASSEMBLY__) */ |
340 | 224 | ||
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h new file mode 100644 index 000000000000..e128112a0d7c --- /dev/null +++ b/include/asm-sparc64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __SPARC64_SPINLOCK_TYPES_H | ||
2 | #define __SPARC64_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned char lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-um/page.h b/include/asm-um/page.h index bd850a249183..2c192abe9aeb 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h | |||
@@ -96,8 +96,7 @@ extern unsigned long uml_physmem; | |||
96 | 96 | ||
97 | #define __va_space (8*1024*1024) | 97 | #define __va_space (8*1024*1024) |
98 | 98 | ||
99 | extern unsigned long to_phys(void *virt); | 99 | #include "mem.h" |
100 | extern void *to_virt(unsigned long phys); | ||
101 | 100 | ||
102 | /* Cast to unsigned long before casting to void * to avoid a warning from | 101 | /* Cast to unsigned long before casting to void * to avoid a warning from |
103 | * mmap_kmem about cutting a long long down to a void *. Not sure that | 102 | * mmap_kmem about cutting a long long down to a void *. Not sure that |
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index b48e0966ecd7..ed06170e0edd 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h | |||
@@ -326,14 +326,22 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) | |||
326 | } | 326 | } |
327 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 327 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
328 | 328 | ||
329 | extern phys_t page_to_phys(struct page *page); | ||
330 | |||
331 | /* | 329 | /* |
332 | * Conversion functions: convert a page and protection to a page entry, | 330 | * Conversion functions: convert a page and protection to a page entry, |
333 | * and a page entry and page directory to the page they refer to. | 331 | * and a page entry and page directory to the page they refer to. |
334 | */ | 332 | */ |
335 | 333 | ||
336 | extern pte_t mk_pte(struct page *page, pgprot_t pgprot); | 334 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) |
335 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) | ||
336 | #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) | ||
337 | |||
338 | #define mk_pte(page, pgprot) \ | ||
339 | ({ pte_t pte; \ | ||
340 | \ | ||
341 | pte_set_val(pte, page_to_phys(page), (pgprot)); \ | ||
342 | if (pte_present(pte)) \ | ||
343 | pte_mknewprot(pte_mknewpage(pte)); \ | ||
344 | pte;}) | ||
337 | 345 | ||
338 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 346 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
339 | { | 347 | { |
@@ -410,8 +418,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
410 | #endif | 418 | #endif |
411 | #endif | 419 | #endif |
412 | 420 | ||
413 | extern struct page *phys_to_page(const unsigned long phys); | ||
414 | extern struct page *__virt_to_page(const unsigned long virt); | ||
415 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) | 421 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) |
416 | 422 | ||
417 | /* | 423 | /* |
diff --git a/include/asm-um/spinlock_types.h b/include/asm-um/spinlock_types.h new file mode 100644 index 000000000000..e5a94294bf82 --- /dev/null +++ b/include/asm-um/spinlock_types.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __UM_SPINLOCK_TYPES_H | ||
2 | #define __UM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #include "asm/arch/spinlock_types.h" | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 16ec82e16b21..6c5d5ca8383a 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h | |||
@@ -109,9 +109,10 @@ extern unsigned int nmi_watchdog; | |||
109 | #define NMI_LOCAL_APIC 2 | 109 | #define NMI_LOCAL_APIC 2 |
110 | #define NMI_INVALID 3 | 110 | #define NMI_INVALID 3 |
111 | 111 | ||
112 | extern int disable_timer_pin_1; | ||
113 | |||
112 | #endif /* CONFIG_X86_LOCAL_APIC */ | 114 | #endif /* CONFIG_X86_LOCAL_APIC */ |
113 | 115 | ||
114 | #define esr_disable 0 | ||
115 | extern unsigned boot_cpu_id; | 116 | extern unsigned boot_cpu_id; |
116 | 117 | ||
117 | #endif /* __ASM_APIC_H */ | 118 | #endif /* __ASM_APIC_H */ |
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h index 9388062c4f6e..fb1c99ac669f 100644 --- a/include/asm-x86_64/apicdef.h +++ b/include/asm-x86_64/apicdef.h | |||
@@ -113,6 +113,7 @@ | |||
113 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | 113 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
114 | 114 | ||
115 | #define MAX_IO_APICS 128 | 115 | #define MAX_IO_APICS 128 |
116 | #define MAX_LOCAL_APIC 256 | ||
116 | 117 | ||
117 | /* | 118 | /* |
118 | * All x86-64 systems are xAPIC compatible. | 119 | * All x86-64 systems are xAPIC compatible. |
diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index eed785667289..80ac1fe966ac 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h | |||
@@ -9,10 +9,8 @@ | |||
9 | */ | 9 | */ |
10 | struct bug_frame { | 10 | struct bug_frame { |
11 | unsigned char ud2[2]; | 11 | unsigned char ud2[2]; |
12 | unsigned char mov; | 12 | unsigned char push; |
13 | /* should use 32bit offset instead, but the assembler doesn't | 13 | signed int filename; |
14 | like it */ | ||
15 | char *filename; | ||
16 | unsigned char ret; | 14 | unsigned char ret; |
17 | unsigned short line; | 15 | unsigned short line; |
18 | } __attribute__((packed)); | 16 | } __attribute__((packed)); |
@@ -25,8 +23,8 @@ struct bug_frame { | |||
25 | The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ | 23 | The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ |
26 | #define BUG() \ | 24 | #define BUG() \ |
27 | asm volatile( \ | 25 | asm volatile( \ |
28 | "ud2 ; .byte 0xa3 ; .quad %c1 ; .byte 0xc2 ; .short %c0" :: \ | 26 | "ud2 ; pushq $%c1 ; ret $%c0" :: \ |
29 | "i"(__LINE__), "i" (__stringify(__FILE__))) | 27 | "i"(__LINE__), "i" (__FILE__)) |
30 | void out_of_line_bug(void); | 28 | void out_of_line_bug(void); |
31 | #else | 29 | #else |
32 | static inline void out_of_line_bug(void) { } | 30 | static inline void out_of_line_bug(void) { } |
diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h index 0bc12655fa5b..fc2c5a6c262a 100644 --- a/include/asm-x86_64/calling.h +++ b/include/asm-x86_64/calling.h | |||
@@ -65,27 +65,36 @@ | |||
65 | .if \skipr11 | 65 | .if \skipr11 |
66 | .else | 66 | .else |
67 | movq (%rsp),%r11 | 67 | movq (%rsp),%r11 |
68 | CFI_RESTORE r11 | ||
68 | .endif | 69 | .endif |
69 | .if \skipr8910 | 70 | .if \skipr8910 |
70 | .else | 71 | .else |
71 | movq 1*8(%rsp),%r10 | 72 | movq 1*8(%rsp),%r10 |
73 | CFI_RESTORE r10 | ||
72 | movq 2*8(%rsp),%r9 | 74 | movq 2*8(%rsp),%r9 |
75 | CFI_RESTORE r9 | ||
73 | movq 3*8(%rsp),%r8 | 76 | movq 3*8(%rsp),%r8 |
77 | CFI_RESTORE r8 | ||
74 | .endif | 78 | .endif |
75 | .if \skiprax | 79 | .if \skiprax |
76 | .else | 80 | .else |
77 | movq 4*8(%rsp),%rax | 81 | movq 4*8(%rsp),%rax |
82 | CFI_RESTORE rax | ||
78 | .endif | 83 | .endif |
79 | .if \skiprcx | 84 | .if \skiprcx |
80 | .else | 85 | .else |
81 | movq 5*8(%rsp),%rcx | 86 | movq 5*8(%rsp),%rcx |
87 | CFI_RESTORE rcx | ||
82 | .endif | 88 | .endif |
83 | .if \skiprdx | 89 | .if \skiprdx |
84 | .else | 90 | .else |
85 | movq 6*8(%rsp),%rdx | 91 | movq 6*8(%rsp),%rdx |
92 | CFI_RESTORE rdx | ||
86 | .endif | 93 | .endif |
87 | movq 7*8(%rsp),%rsi | 94 | movq 7*8(%rsp),%rsi |
95 | CFI_RESTORE rsi | ||
88 | movq 8*8(%rsp),%rdi | 96 | movq 8*8(%rsp),%rdi |
97 | CFI_RESTORE rdi | ||
89 | .if ARG_SKIP+\addskip > 0 | 98 | .if ARG_SKIP+\addskip > 0 |
90 | addq $ARG_SKIP+\addskip,%rsp | 99 | addq $ARG_SKIP+\addskip,%rsp |
91 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) | 100 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) |
@@ -124,11 +133,17 @@ | |||
124 | 133 | ||
125 | .macro RESTORE_REST | 134 | .macro RESTORE_REST |
126 | movq (%rsp),%r15 | 135 | movq (%rsp),%r15 |
136 | CFI_RESTORE r15 | ||
127 | movq 1*8(%rsp),%r14 | 137 | movq 1*8(%rsp),%r14 |
138 | CFI_RESTORE r14 | ||
128 | movq 2*8(%rsp),%r13 | 139 | movq 2*8(%rsp),%r13 |
140 | CFI_RESTORE r13 | ||
129 | movq 3*8(%rsp),%r12 | 141 | movq 3*8(%rsp),%r12 |
142 | CFI_RESTORE r12 | ||
130 | movq 4*8(%rsp),%rbp | 143 | movq 4*8(%rsp),%rbp |
144 | CFI_RESTORE rbp | ||
131 | movq 5*8(%rsp),%rbx | 145 | movq 5*8(%rsp),%rbx |
146 | CFI_RESTORE rbx | ||
132 | addq $REST_SKIP,%rsp | 147 | addq $REST_SKIP,%rsp |
133 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) | 148 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) |
134 | .endm | 149 | .endm |
@@ -146,11 +161,3 @@ | |||
146 | .macro icebp | 161 | .macro icebp |
147 | .byte 0xf1 | 162 | .byte 0xf1 |
148 | .endm | 163 | .endm |
149 | |||
150 | #ifdef CONFIG_FRAME_POINTER | ||
151 | #define ENTER enter | ||
152 | #define LEAVE leave | ||
153 | #else | ||
154 | #define ENTER | ||
155 | #define LEAVE | ||
156 | #endif | ||
diff --git a/include/asm-x86_64/current.h b/include/asm-x86_64/current.h index 7db560ee6f70..bc8adecee66d 100644 --- a/include/asm-x86_64/current.h +++ b/include/asm-x86_64/current.h | |||
@@ -17,7 +17,7 @@ static inline struct task_struct *get_current(void) | |||
17 | #else | 17 | #else |
18 | 18 | ||
19 | #ifndef ASM_OFFSET_H | 19 | #ifndef ASM_OFFSET_H |
20 | #include <asm/offset.h> | 20 | #include <asm/asm-offsets.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg | 23 | #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg |
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index c89b58bebee2..594e610f4a1e 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h | |||
@@ -191,7 +191,7 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | |||
191 | /* | 191 | /* |
192 | * load one particular LDT into the current CPU | 192 | * load one particular LDT into the current CPU |
193 | */ | 193 | */ |
194 | extern inline void load_LDT_nolock (mm_context_t *pc, int cpu) | 194 | static inline void load_LDT_nolock (mm_context_t *pc, int cpu) |
195 | { | 195 | { |
196 | int count = pc->size; | 196 | int count = pc->size; |
197 | 197 | ||
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index a416dc31634a..e784fdc524f1 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h | |||
@@ -85,6 +85,11 @@ static inline void dma_sync_single_for_device(struct device *hwdev, | |||
85 | flush_write_buffers(); | 85 | flush_write_buffers(); |
86 | } | 86 | } |
87 | 87 | ||
88 | #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ | ||
89 | dma_sync_single_for_cpu(dev, dma_handle, size, dir) | ||
90 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | ||
91 | dma_sync_single_for_device(dev, dma_handle, size, dir) | ||
92 | |||
88 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, | 93 | static inline void dma_sync_sg_for_cpu(struct device *hwdev, |
89 | struct scatterlist *sg, | 94 | struct scatterlist *sg, |
90 | int nelems, int direction) | 95 | int nelems, int direction) |
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h index afd4212e860b..582757fc0365 100644 --- a/include/asm-x86_64/dwarf2.h +++ b/include/asm-x86_64/dwarf2.h | |||
@@ -24,6 +24,10 @@ | |||
24 | #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset | 24 | #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset |
25 | #define CFI_OFFSET .cfi_offset | 25 | #define CFI_OFFSET .cfi_offset |
26 | #define CFI_REL_OFFSET .cfi_rel_offset | 26 | #define CFI_REL_OFFSET .cfi_rel_offset |
27 | #define CFI_REGISTER .cfi_register | ||
28 | #define CFI_RESTORE .cfi_restore | ||
29 | #define CFI_REMEMBER_STATE .cfi_remember_state | ||
30 | #define CFI_RESTORE_STATE .cfi_restore_state | ||
27 | 31 | ||
28 | #else | 32 | #else |
29 | 33 | ||
@@ -36,6 +40,10 @@ | |||
36 | #define CFI_ADJUST_CFA_OFFSET # | 40 | #define CFI_ADJUST_CFA_OFFSET # |
37 | #define CFI_OFFSET # | 41 | #define CFI_OFFSET # |
38 | #define CFI_REL_OFFSET # | 42 | #define CFI_REL_OFFSET # |
43 | #define CFI_REGISTER # | ||
44 | #define CFI_RESTORE # | ||
45 | #define CFI_REMEMBER_STATE # | ||
46 | #define CFI_RESTORE_STATE # | ||
39 | 47 | ||
40 | #endif | 48 | #endif |
41 | 49 | ||
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h index cf8b16cbe8db..a582cfcf2231 100644 --- a/include/asm-x86_64/fixmap.h +++ b/include/asm-x86_64/fixmap.h | |||
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void); | |||
76 | * directly without translation, we catch the bug with a NULL-deference | 76 | * directly without translation, we catch the bug with a NULL-deference |
77 | * kernel oops. Illegal ranges of incoming indices are caught too. | 77 | * kernel oops. Illegal ranges of incoming indices are caught too. |
78 | */ | 78 | */ |
79 | extern inline unsigned long fix_to_virt(const unsigned int idx) | 79 | static inline unsigned long fix_to_virt(const unsigned int idx) |
80 | { | 80 | { |
81 | /* | 81 | /* |
82 | * this branch gets completely eliminated after inlining, | 82 | * this branch gets completely eliminated after inlining, |
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h index 27c381fa1c9d..8661b476fb40 100644 --- a/include/asm-x86_64/hardirq.h +++ b/include/asm-x86_64/hardirq.h | |||
@@ -9,11 +9,12 @@ | |||
9 | 9 | ||
10 | #define __ARCH_IRQ_STAT 1 | 10 | #define __ARCH_IRQ_STAT 1 |
11 | 11 | ||
12 | /* Generate a lvalue for a pda member. Should fix softirq.c instead to use | 12 | #define local_softirq_pending() read_pda(__softirq_pending) |
13 | special access macros. This would generate better code. */ | ||
14 | #define __IRQ_STAT(cpu,member) (read_pda(me)->member) | ||
15 | 13 | ||
16 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | 14 | #define __ARCH_SET_SOFTIRQ_PENDING 1 |
15 | |||
16 | #define set_softirq_pending(x) write_pda(__softirq_pending, (x)) | ||
17 | #define or_softirq_pending(x) or_pda(__softirq_pending, (x)) | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * 'what should we do if we get a hw irq event on an illegal vector'. | 20 | * 'what should we do if we get a hw irq event on an illegal vector'. |
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 2b5cb2865d21..dc97668ea0f9 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h | |||
@@ -26,6 +26,7 @@ | |||
26 | struct hw_interrupt_type; | 26 | struct hw_interrupt_type; |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #define NMI_VECTOR 0x02 | ||
29 | /* | 30 | /* |
30 | * IDT vectors usable for external interrupt sources start | 31 | * IDT vectors usable for external interrupt sources start |
31 | * at 0x20: | 32 | * at 0x20: |
@@ -50,14 +51,15 @@ struct hw_interrupt_type; | |||
50 | */ | 51 | */ |
51 | #define SPURIOUS_APIC_VECTOR 0xff | 52 | #define SPURIOUS_APIC_VECTOR 0xff |
52 | #define ERROR_APIC_VECTOR 0xfe | 53 | #define ERROR_APIC_VECTOR 0xfe |
53 | #define INVALIDATE_TLB_VECTOR 0xfd | 54 | #define RESCHEDULE_VECTOR 0xfd |
54 | #define RESCHEDULE_VECTOR 0xfc | 55 | #define CALL_FUNCTION_VECTOR 0xfc |
55 | #define TASK_MIGRATION_VECTOR 0xfb | 56 | #define KDB_VECTOR 0xfb /* reserved for KDB */ |
56 | #define CALL_FUNCTION_VECTOR 0xfa | 57 | #define THERMAL_APIC_VECTOR 0xfa |
57 | #define KDB_VECTOR 0xf9 | 58 | /* 0xf9 free */ |
58 | 59 | #define INVALIDATE_TLB_VECTOR_END 0xf8 | |
59 | #define THERMAL_APIC_VECTOR 0xf0 | 60 | #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ |
60 | 61 | ||
62 | #define NUM_INVALIDATE_TLB_VECTORS 8 | ||
61 | 63 | ||
62 | /* | 64 | /* |
63 | * Local APIC timer IRQ vector is on a different priority level, | 65 | * Local APIC timer IRQ vector is on a different priority level, |
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 37fc3f149a5a..52ff269fe054 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h | |||
@@ -48,7 +48,7 @@ | |||
48 | * Talk about misusing macros.. | 48 | * Talk about misusing macros.. |
49 | */ | 49 | */ |
50 | #define __OUT1(s,x) \ | 50 | #define __OUT1(s,x) \ |
51 | extern inline void out##s(unsigned x value, unsigned short port) { | 51 | static inline void out##s(unsigned x value, unsigned short port) { |
52 | 52 | ||
53 | #define __OUT2(s,s1,s2) \ | 53 | #define __OUT2(s,s1,s2) \ |
54 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" | 54 | __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" |
@@ -58,7 +58,7 @@ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ | |||
58 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ | 58 | __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ |
59 | 59 | ||
60 | #define __IN1(s) \ | 60 | #define __IN1(s) \ |
61 | extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; | 61 | static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; |
62 | 62 | ||
63 | #define __IN2(s,s1,s2) \ | 63 | #define __IN2(s,s1,s2) \ |
64 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" | 64 | __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" |
@@ -68,12 +68,12 @@ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | |||
68 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ | 68 | __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ |
69 | 69 | ||
70 | #define __INS(s) \ | 70 | #define __INS(s) \ |
71 | extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ | 71 | static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ |
72 | { __asm__ __volatile__ ("rep ; ins" #s \ | 72 | { __asm__ __volatile__ ("rep ; ins" #s \ |
73 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 73 | : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
74 | 74 | ||
75 | #define __OUTS(s) \ | 75 | #define __OUTS(s) \ |
76 | extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ | 76 | static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ |
77 | { __asm__ __volatile__ ("rep ; outs" #s \ | 77 | { __asm__ __volatile__ ("rep ; outs" #s \ |
78 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } | 78 | : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } |
79 | 79 | ||
@@ -110,12 +110,12 @@ __OUTS(l) | |||
110 | * Change virtual addresses to physical addresses and vv. | 110 | * Change virtual addresses to physical addresses and vv. |
111 | * These are pretty trivial | 111 | * These are pretty trivial |
112 | */ | 112 | */ |
113 | extern inline unsigned long virt_to_phys(volatile void * address) | 113 | static inline unsigned long virt_to_phys(volatile void * address) |
114 | { | 114 | { |
115 | return __pa(address); | 115 | return __pa(address); |
116 | } | 116 | } |
117 | 117 | ||
118 | extern inline void * phys_to_virt(unsigned long address) | 118 | static inline void * phys_to_virt(unsigned long address) |
119 | { | 119 | { |
120 | return __va(address); | 120 | return __va(address); |
121 | } | 121 | } |
@@ -130,7 +130,7 @@ extern inline void * phys_to_virt(unsigned long address) | |||
130 | 130 | ||
131 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); | 131 | extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); |
132 | 132 | ||
133 | extern inline void __iomem * ioremap (unsigned long offset, unsigned long size) | 133 | static inline void __iomem * ioremap (unsigned long offset, unsigned long size) |
134 | { | 134 | { |
135 | return __ioremap(offset, size, 0); | 135 | return __ioremap(offset, size, 0); |
136 | } | 136 | } |
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h index 5e166b9d3bde..022e9d340ad7 100644 --- a/include/asm-x86_64/ipi.h +++ b/include/asm-x86_64/ipi.h | |||
@@ -31,9 +31,20 @@ | |||
31 | 31 | ||
32 | static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) | 32 | static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) |
33 | { | 33 | { |
34 | unsigned int icr = APIC_DM_FIXED | shortcut | vector | dest; | 34 | unsigned int icr = shortcut | dest; |
35 | if (vector == KDB_VECTOR) | 35 | |
36 | icr = (icr & (~APIC_VECTOR_MASK)) | APIC_DM_NMI; | 36 | switch (vector) { |
37 | default: | ||
38 | icr |= APIC_DM_FIXED | vector; | ||
39 | break; | ||
40 | case NMI_VECTOR: | ||
41 | /* | ||
42 | * Setup KDB IPI to be delivered as an NMI | ||
43 | */ | ||
44 | case KDB_VECTOR: | ||
45 | icr |= APIC_DM_NMI; | ||
46 | break; | ||
47 | } | ||
37 | return icr; | 48 | return icr; |
38 | } | 49 | } |
39 | 50 | ||
@@ -66,7 +77,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign | |||
66 | /* | 77 | /* |
67 | * Send the IPI. The write to APIC_ICR fires this off. | 78 | * Send the IPI. The write to APIC_ICR fires this off. |
68 | */ | 79 | */ |
69 | apic_write_around(APIC_ICR, cfg); | 80 | apic_write(APIC_ICR, cfg); |
70 | } | 81 | } |
71 | 82 | ||
72 | 83 | ||
@@ -92,7 +103,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
92 | * prepare target chip field | 103 | * prepare target chip field |
93 | */ | 104 | */ |
94 | cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); | 105 | cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); |
95 | apic_write_around(APIC_ICR2, cfg); | 106 | apic_write(APIC_ICR2, cfg); |
96 | 107 | ||
97 | /* | 108 | /* |
98 | * program the ICR | 109 | * program the ICR |
@@ -102,7 +113,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
102 | /* | 113 | /* |
103 | * Send the IPI. The write to APIC_ICR fires this off. | 114 | * Send the IPI. The write to APIC_ICR fires this off. |
104 | */ | 115 | */ |
105 | apic_write_around(APIC_ICR, cfg); | 116 | apic_write(APIC_ICR, cfg); |
106 | } | 117 | } |
107 | local_irq_restore(flags); | 118 | local_irq_restore(flags); |
108 | } | 119 | } |
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index 4482657777bb..fb724ba37ae6 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h | |||
@@ -48,10 +48,6 @@ static __inline__ int irq_canonicalize(int irq) | |||
48 | #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ | 48 | #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | struct irqaction; | ||
52 | struct pt_regs; | ||
53 | int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); | ||
54 | |||
55 | #ifdef CONFIG_HOTPLUG_CPU | 51 | #ifdef CONFIG_HOTPLUG_CPU |
56 | #include <linux/cpumask.h> | 52 | #include <linux/cpumask.h> |
57 | extern void fixup_irqs(cpumask_t map); | 53 | extern void fixup_irqs(cpumask_t map); |
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b90341994d80..f604e84c5303 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h | |||
@@ -46,7 +46,7 @@ extern void die(const char *,struct pt_regs *,long); | |||
46 | extern void __die(const char *,struct pt_regs *,long); | 46 | extern void __die(const char *,struct pt_regs *,long); |
47 | extern void show_registers(struct pt_regs *regs); | 47 | extern void show_registers(struct pt_regs *regs); |
48 | extern void dump_pagetable(unsigned long); | 48 | extern void dump_pagetable(unsigned long); |
49 | extern void oops_begin(void); | 49 | extern unsigned long oops_begin(void); |
50 | extern void oops_end(void); | 50 | extern void oops_end(unsigned long); |
51 | 51 | ||
52 | #endif | 52 | #endif |
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index c954f15c1a75..3e72c41727c5 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h | |||
@@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v) | |||
29 | :"m" (v->counter)); | 29 | :"m" (v->counter)); |
30 | } | 30 | } |
31 | 31 | ||
32 | static __inline__ void local_add(unsigned long i, local_t *v) | 32 | static __inline__ void local_add(unsigned int i, local_t *v) |
33 | { | 33 | { |
34 | __asm__ __volatile__( | 34 | __asm__ __volatile__( |
35 | "addl %1,%0" | 35 | "addl %1,%0" |
@@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned long i, local_t *v) | |||
37 | :"ir" (i), "m" (v->counter)); | 37 | :"ir" (i), "m" (v->counter)); |
38 | } | 38 | } |
39 | 39 | ||
40 | static __inline__ void local_sub(unsigned long i, local_t *v) | 40 | static __inline__ void local_sub(unsigned int i, local_t *v) |
41 | { | 41 | { |
42 | __asm__ __volatile__( | 42 | __asm__ __volatile__( |
43 | "subl %1,%0" | 43 | "subl %1,%0" |
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 768413751b34..b40c661f111e 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/smp.h> | 13 | #include <asm/smp.h> |
14 | 14 | ||
15 | #define NODEMAPSIZE 0xff | 15 | #define NODEMAPSIZE 0xfff |
16 | 16 | ||
17 | /* Simple perfect hash to map physical addresses to node numbers */ | 17 | /* Simple perfect hash to map physical addresses to node numbers */ |
18 | extern int memnode_shift; | 18 | extern int memnode_shift; |
@@ -54,7 +54,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
54 | 54 | ||
55 | #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ | 55 | #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ |
56 | ({ u8 nid__ = pfn_to_nid(pfn); \ | 56 | ({ u8 nid__ = pfn_to_nid(pfn); \ |
57 | nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); })) | 57 | nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); })) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #define local_mapnr(kvaddr) \ | 60 | #define local_mapnr(kvaddr) \ |
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index ba15279a79d0..4d727f3f5550 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h | |||
@@ -29,22 +29,37 @@ | |||
29 | #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) | 29 | #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) |
30 | 30 | ||
31 | /* wrmsr with exception handling */ | 31 | /* wrmsr with exception handling */ |
32 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ | 32 | #define wrmsr_safe(msr,a,b) ({ int ret__; \ |
33 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ | 33 | asm volatile("2: wrmsr ; xorl %0,%0\n" \ |
34 | "1:\n\t" \ | 34 | "1:\n\t" \ |
35 | ".section .fixup,\"ax\"\n\t" \ | 35 | ".section .fixup,\"ax\"\n\t" \ |
36 | "3: movl %4,%0 ; jmp 1b\n\t" \ | 36 | "3: movl %4,%0 ; jmp 1b\n\t" \ |
37 | ".previous\n\t" \ | 37 | ".previous\n\t" \ |
38 | ".section __ex_table,\"a\"\n" \ | 38 | ".section __ex_table,\"a\"\n" \ |
39 | " .align 8\n\t" \ | 39 | " .align 8\n\t" \ |
40 | " .quad 2b,3b\n\t" \ | 40 | " .quad 2b,3b\n\t" \ |
41 | ".previous" \ | 41 | ".previous" \ |
42 | : "=a" (ret__) \ | 42 | : "=a" (ret__) \ |
43 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ | 43 | : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ |
44 | ret__; }) | 44 | ret__; }) |
45 | 45 | ||
46 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) | 46 | #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) |
47 | 47 | ||
48 | #define rdmsr_safe(msr,a,b) \ | ||
49 | ({ int ret__; \ | ||
50 | asm volatile ("1: rdmsr\n" \ | ||
51 | "2:\n" \ | ||
52 | ".section .fixup,\"ax\"\n" \ | ||
53 | "3: movl %4,%0\n" \ | ||
54 | " jmp 2b\n" \ | ||
55 | ".previous\n" \ | ||
56 | ".section __ex_table,\"a\"\n" \ | ||
57 | " .align 8\n" \ | ||
58 | " .quad 1b,3b\n" \ | ||
59 | ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\ | ||
60 | :"c"(msr), "i"(-EIO), "0"(0)); \ | ||
61 | ret__; }) | ||
62 | |||
48 | #define rdtsc(low,high) \ | 63 | #define rdtsc(low,high) \ |
49 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) | 64 | __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) |
50 | 65 | ||
@@ -64,7 +79,7 @@ | |||
64 | : "=a" (low), "=d" (high) \ | 79 | : "=a" (low), "=d" (high) \ |
65 | : "c" (counter)) | 80 | : "c" (counter)) |
66 | 81 | ||
67 | extern inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, | 82 | static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, |
68 | unsigned int *ecx, unsigned int *edx) | 83 | unsigned int *ecx, unsigned int *edx) |
69 | { | 84 | { |
70 | __asm__("cpuid" | 85 | __asm__("cpuid" |
@@ -90,7 +105,7 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | |||
90 | /* | 105 | /* |
91 | * CPUID functions returning a single datum | 106 | * CPUID functions returning a single datum |
92 | */ | 107 | */ |
93 | extern inline unsigned int cpuid_eax(unsigned int op) | 108 | static inline unsigned int cpuid_eax(unsigned int op) |
94 | { | 109 | { |
95 | unsigned int eax; | 110 | unsigned int eax; |
96 | 111 | ||
@@ -100,7 +115,7 @@ extern inline unsigned int cpuid_eax(unsigned int op) | |||
100 | : "bx", "cx", "dx"); | 115 | : "bx", "cx", "dx"); |
101 | return eax; | 116 | return eax; |
102 | } | 117 | } |
103 | extern inline unsigned int cpuid_ebx(unsigned int op) | 118 | static inline unsigned int cpuid_ebx(unsigned int op) |
104 | { | 119 | { |
105 | unsigned int eax, ebx; | 120 | unsigned int eax, ebx; |
106 | 121 | ||
@@ -110,7 +125,7 @@ extern inline unsigned int cpuid_ebx(unsigned int op) | |||
110 | : "cx", "dx" ); | 125 | : "cx", "dx" ); |
111 | return ebx; | 126 | return ebx; |
112 | } | 127 | } |
113 | extern inline unsigned int cpuid_ecx(unsigned int op) | 128 | static inline unsigned int cpuid_ecx(unsigned int op) |
114 | { | 129 | { |
115 | unsigned int eax, ecx; | 130 | unsigned int eax, ecx; |
116 | 131 | ||
@@ -120,7 +135,7 @@ extern inline unsigned int cpuid_ecx(unsigned int op) | |||
120 | : "bx", "dx" ); | 135 | : "bx", "dx" ); |
121 | return ecx; | 136 | return ecx; |
122 | } | 137 | } |
123 | extern inline unsigned int cpuid_edx(unsigned int op) | 138 | static inline unsigned int cpuid_edx(unsigned int op) |
124 | { | 139 | { |
125 | unsigned int eax, edx; | 140 | unsigned int eax, edx; |
126 | 141 | ||
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h index 5c363a1482e4..bcf55c3f7f7f 100644 --- a/include/asm-x86_64/numa.h +++ b/include/asm-x86_64/numa.h | |||
@@ -9,6 +9,7 @@ struct node { | |||
9 | }; | 9 | }; |
10 | 10 | ||
11 | extern int compute_hash_shift(struct node *nodes, int numnodes); | 11 | extern int compute_hash_shift(struct node *nodes, int numnodes); |
12 | extern int pxm_to_node(int nid); | ||
12 | 13 | ||
13 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) | 14 | #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) |
14 | 15 | ||
@@ -16,6 +17,8 @@ extern void numa_add_cpu(int cpu); | |||
16 | extern void numa_init_array(void); | 17 | extern void numa_init_array(void); |
17 | extern int numa_off; | 18 | extern int numa_off; |
18 | 19 | ||
20 | extern unsigned char apicid_to_node[256]; | ||
21 | |||
19 | #define NUMA_NO_NODE 0xff | 22 | #define NUMA_NO_NODE 0xff |
20 | 23 | ||
21 | #endif | 24 | #endif |
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 135ffaa0393b..e5ab4d231f2c 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
@@ -32,6 +32,8 @@ | |||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | #ifndef __ASSEMBLY__ | 33 | #ifndef __ASSEMBLY__ |
34 | 34 | ||
35 | extern unsigned long end_pfn; | ||
36 | |||
35 | void clear_page(void *); | 37 | void clear_page(void *); |
36 | void copy_page(void *, void *); | 38 | void copy_page(void *, void *); |
37 | 39 | ||
@@ -111,7 +113,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
111 | #ifdef CONFIG_FLATMEM | 113 | #ifdef CONFIG_FLATMEM |
112 | #define pfn_to_page(pfn) (mem_map + (pfn)) | 114 | #define pfn_to_page(pfn) (mem_map + (pfn)) |
113 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | 115 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) |
114 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 116 | #define pfn_valid(pfn) ((pfn) < end_pfn) |
115 | #endif | 117 | #endif |
116 | 118 | ||
117 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 119 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index eeb3088a1c9e..5a82a6762c21 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h | |||
@@ -50,10 +50,10 @@ extern int iommu_setup(char *opt); | |||
50 | * address space. The networking and block device layers use | 50 | * address space. The networking and block device layers use |
51 | * this boolean for bounce buffer decisions | 51 | * this boolean for bounce buffer decisions |
52 | * | 52 | * |
53 | * On AMD64 it mostly equals, but we set it to zero to tell some subsystems | 53 | * On x86-64 it mostly equals, but we set it to zero to tell some subsystems |
54 | * that an IOMMU is available. | 54 | * that an hard or soft IOMMU is available. |
55 | */ | 55 | */ |
56 | #define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0) | 56 | #define PCI_DMA_BUS_IS_PHYS 0 |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * x86-64 always supports DAC, but sometimes it is useful to force | 59 | * x86-64 always supports DAC, but sometimes it is useful to force |
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index 36b766cfc4d5..bbf89aa8a1af 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h | |||
@@ -10,10 +10,8 @@ | |||
10 | struct x8664_pda { | 10 | struct x8664_pda { |
11 | struct task_struct *pcurrent; /* Current process */ | 11 | struct task_struct *pcurrent; /* Current process */ |
12 | unsigned long data_offset; /* Per cpu data offset from linker address */ | 12 | unsigned long data_offset; /* Per cpu data offset from linker address */ |
13 | struct x8664_pda *me; /* Pointer to itself */ | ||
14 | unsigned long kernelstack; /* top of kernel stack for current */ | 13 | unsigned long kernelstack; /* top of kernel stack for current */ |
15 | unsigned long oldrsp; /* user rsp for system call */ | 14 | unsigned long oldrsp; /* user rsp for system call */ |
16 | unsigned long irqrsp; /* Old rsp for interrupts. */ | ||
17 | int irqcount; /* Irq nesting counter. Starts with -1 */ | 15 | int irqcount; /* Irq nesting counter. Starts with -1 */ |
18 | int cpunumber; /* Logical CPU number */ | 16 | int cpunumber; /* Logical CPU number */ |
19 | char *irqstackptr; /* top of irqstack */ | 17 | char *irqstackptr; /* top of irqstack */ |
@@ -22,7 +20,7 @@ struct x8664_pda { | |||
22 | struct mm_struct *active_mm; | 20 | struct mm_struct *active_mm; |
23 | int mmu_state; | 21 | int mmu_state; |
24 | unsigned apic_timer_irqs; | 22 | unsigned apic_timer_irqs; |
25 | } ____cacheline_aligned; | 23 | } ____cacheline_aligned_in_smp; |
26 | 24 | ||
27 | 25 | ||
28 | #define IRQSTACK_ORDER 2 | 26 | #define IRQSTACK_ORDER 2 |
@@ -42,13 +40,14 @@ extern void __bad_pda_field(void); | |||
42 | #define pda_offset(field) offsetof(struct x8664_pda, field) | 40 | #define pda_offset(field) offsetof(struct x8664_pda, field) |
43 | 41 | ||
44 | #define pda_to_op(op,field,val) do { \ | 42 | #define pda_to_op(op,field,val) do { \ |
43 | typedef typeof_field(struct x8664_pda, field) T__; \ | ||
45 | switch (sizeof_field(struct x8664_pda, field)) { \ | 44 | switch (sizeof_field(struct x8664_pda, field)) { \ |
46 | case 2: \ | 45 | case 2: \ |
47 | asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ | 46 | asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ |
48 | case 4: \ | 47 | case 4: \ |
49 | asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ | 48 | asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ |
50 | case 8: \ | 49 | case 8: \ |
51 | asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ | 50 | asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ |
52 | default: __bad_pda_field(); \ | 51 | default: __bad_pda_field(); \ |
53 | } \ | 52 | } \ |
54 | } while (0) | 53 | } while (0) |
@@ -58,7 +57,7 @@ asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); bre | |||
58 | * Unfortunately removing them causes all hell to break lose currently. | 57 | * Unfortunately removing them causes all hell to break lose currently. |
59 | */ | 58 | */ |
60 | #define pda_from_op(op,field) ({ \ | 59 | #define pda_from_op(op,field) ({ \ |
61 | typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \ | 60 | typeof_field(struct x8664_pda, field) ret__; \ |
62 | switch (sizeof_field(struct x8664_pda, field)) { \ | 61 | switch (sizeof_field(struct x8664_pda, field)) { \ |
63 | case 2: \ | 62 | case 2: \ |
64 | asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ | 63 | asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ |
@@ -75,6 +74,7 @@ asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); b | |||
75 | #define write_pda(field,val) pda_to_op("mov",field,val) | 74 | #define write_pda(field,val) pda_to_op("mov",field,val) |
76 | #define add_pda(field,val) pda_to_op("add",field,val) | 75 | #define add_pda(field,val) pda_to_op("add",field,val) |
77 | #define sub_pda(field,val) pda_to_op("sub",field,val) | 76 | #define sub_pda(field,val) pda_to_op("sub",field,val) |
77 | #define or_pda(field,val) pda_to_op("or",field,val) | ||
78 | 78 | ||
79 | #endif | 79 | #endif |
80 | 80 | ||
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index deadd146978b..08cad2482bcb 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h | |||
@@ -18,12 +18,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p | |||
18 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); | 18 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); |
19 | } | 19 | } |
20 | 20 | ||
21 | extern __inline__ pmd_t *get_pmd(void) | 21 | static inline pmd_t *get_pmd(void) |
22 | { | 22 | { |
23 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); | 23 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); |
24 | } | 24 | } |
25 | 25 | ||
26 | extern __inline__ void pmd_free(pmd_t *pmd) | 26 | static inline void pmd_free(pmd_t *pmd) |
27 | { | 27 | { |
28 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | 28 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
29 | free_page((unsigned long)pmd); | 29 | free_page((unsigned long)pmd); |
@@ -86,13 +86,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add | |||
86 | /* Should really implement gc for free page table pages. This could be | 86 | /* Should really implement gc for free page table pages. This could be |
87 | done with a reference count in struct page. */ | 87 | done with a reference count in struct page. */ |
88 | 88 | ||
89 | extern __inline__ void pte_free_kernel(pte_t *pte) | 89 | static inline void pte_free_kernel(pte_t *pte) |
90 | { | 90 | { |
91 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | 91 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); |
92 | free_page((unsigned long)pte); | 92 | free_page((unsigned long)pte); |
93 | } | 93 | } |
94 | 94 | ||
95 | extern inline void pte_free(struct page *pte) | 95 | static inline void pte_free(struct page *pte) |
96 | { | 96 | { |
97 | __free_page(pte); | 97 | __free_page(pte); |
98 | } | 98 | } |
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 5e0f2fdab0d3..1dc110ba82d6 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -85,7 +85,7 @@ static inline void set_pud(pud_t *dst, pud_t val) | |||
85 | pud_val(*dst) = pud_val(val); | 85 | pud_val(*dst) = pud_val(val); |
86 | } | 86 | } |
87 | 87 | ||
88 | extern inline void pud_clear (pud_t *pud) | 88 | static inline void pud_clear (pud_t *pud) |
89 | { | 89 | { |
90 | set_pud(pud, __pud(0)); | 90 | set_pud(pud, __pud(0)); |
91 | } | 91 | } |
@@ -95,7 +95,7 @@ static inline void set_pgd(pgd_t *dst, pgd_t val) | |||
95 | pgd_val(*dst) = pgd_val(val); | 95 | pgd_val(*dst) = pgd_val(val); |
96 | } | 96 | } |
97 | 97 | ||
98 | extern inline void pgd_clear (pgd_t * pgd) | 98 | static inline void pgd_clear (pgd_t * pgd) |
99 | { | 99 | { |
100 | set_pgd(pgd, __pgd(0)); | 100 | set_pgd(pgd, __pgd(0)); |
101 | } | 101 | } |
@@ -375,7 +375,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |||
375 | } | 375 | } |
376 | 376 | ||
377 | /* Change flags of a PTE */ | 377 | /* Change flags of a PTE */ |
378 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 378 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
379 | { | 379 | { |
380 | pte_val(pte) &= _PAGE_CHG_MASK; | 380 | pte_val(pte) &= _PAGE_CHG_MASK; |
381 | pte_val(pte) |= pgprot_val(newprot); | 381 | pte_val(pte) |= pgprot_val(newprot); |
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index a8321999448f..03837d34fba0 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h | |||
@@ -254,7 +254,13 @@ struct thread_struct { | |||
254 | u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; | 254 | u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; |
255 | } __attribute__((aligned(16))); | 255 | } __attribute__((aligned(16))); |
256 | 256 | ||
257 | #define INIT_THREAD {} | 257 | #define INIT_THREAD { \ |
258 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
259 | } | ||
260 | |||
261 | #define INIT_TSS { \ | ||
262 | .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | ||
263 | } | ||
258 | 264 | ||
259 | #define INIT_MMAP \ | 265 | #define INIT_MMAP \ |
260 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } | 266 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } |
@@ -375,13 +381,13 @@ struct extended_sigtable { | |||
375 | #define ASM_NOP_MAX 8 | 381 | #define ASM_NOP_MAX 8 |
376 | 382 | ||
377 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 383 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
378 | extern inline void rep_nop(void) | 384 | static inline void rep_nop(void) |
379 | { | 385 | { |
380 | __asm__ __volatile__("rep;nop": : :"memory"); | 386 | __asm__ __volatile__("rep;nop": : :"memory"); |
381 | } | 387 | } |
382 | 388 | ||
383 | /* Stop speculative execution */ | 389 | /* Stop speculative execution */ |
384 | extern inline void sync_core(void) | 390 | static inline void sync_core(void) |
385 | { | 391 | { |
386 | int tmp; | 392 | int tmp; |
387 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | 393 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); |
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 6c813eb521f3..dbb37b0adb43 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
@@ -8,7 +8,6 @@ | |||
8 | struct cpuinfo_x86; | 8 | struct cpuinfo_x86; |
9 | struct pt_regs; | 9 | struct pt_regs; |
10 | 10 | ||
11 | extern void get_cpu_vendor(struct cpuinfo_x86*); | ||
12 | extern void start_kernel(void); | 11 | extern void start_kernel(void); |
13 | extern void pda_init(int); | 12 | extern void pda_init(int); |
14 | 13 | ||
@@ -75,9 +74,6 @@ extern void acpi_reserve_bootmem(void); | |||
75 | 74 | ||
76 | extern void swap_low_mappings(void); | 75 | extern void swap_low_mappings(void); |
77 | 76 | ||
78 | extern void oops_begin(void); | ||
79 | extern void die(const char *,struct pt_regs *,long); | ||
80 | extern void __die(const char * str, struct pt_regs * regs, long err); | ||
81 | extern void __show_regs(struct pt_regs * regs); | 77 | extern void __show_regs(struct pt_regs * regs); |
82 | extern void show_regs(struct pt_regs * regs); | 78 | extern void show_regs(struct pt_regs * regs); |
83 | 79 | ||
@@ -94,8 +90,6 @@ extern int unhandled_signal(struct task_struct *tsk, int sig); | |||
94 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 90 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
95 | extern void swiotlb_init(void); | 91 | extern void swiotlb_init(void); |
96 | 92 | ||
97 | extern unsigned long max_mapnr; | ||
98 | extern unsigned long end_pfn; | ||
99 | extern unsigned long table_start, table_end; | 93 | extern unsigned long table_start, table_end; |
100 | 94 | ||
101 | extern int exception_trace; | 95 | extern int exception_trace; |
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index fe9b96d94815..f8d55798535a 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h | |||
@@ -143,23 +143,23 @@ typedef struct sigaltstack { | |||
143 | #undef __HAVE_ARCH_SIG_BITOPS | 143 | #undef __HAVE_ARCH_SIG_BITOPS |
144 | #if 0 | 144 | #if 0 |
145 | 145 | ||
146 | extern __inline__ void sigaddset(sigset_t *set, int _sig) | 146 | static inline void sigaddset(sigset_t *set, int _sig) |
147 | { | 147 | { |
148 | __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); | 148 | __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); |
149 | } | 149 | } |
150 | 150 | ||
151 | extern __inline__ void sigdelset(sigset_t *set, int _sig) | 151 | static inline void sigdelset(sigset_t *set, int _sig) |
152 | { | 152 | { |
153 | __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); | 153 | __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); |
154 | } | 154 | } |
155 | 155 | ||
156 | extern __inline__ int __const_sigismember(sigset_t *set, int _sig) | 156 | static inline int __const_sigismember(sigset_t *set, int _sig) |
157 | { | 157 | { |
158 | unsigned long sig = _sig - 1; | 158 | unsigned long sig = _sig - 1; |
159 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); | 159 | return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); |
160 | } | 160 | } |
161 | 161 | ||
162 | extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) | 162 | static inline int __gen_sigismember(sigset_t *set, int _sig) |
163 | { | 163 | { |
164 | int ret; | 164 | int ret; |
165 | __asm__("btq %2,%1\n\tsbbq %0,%0" | 165 | __asm__("btq %2,%1\n\tsbbq %0,%0" |
@@ -172,7 +172,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) | |||
172 | __const_sigismember((set),(sig)) : \ | 172 | __const_sigismember((set),(sig)) : \ |
173 | __gen_sigismember((set),(sig))) | 173 | __gen_sigismember((set),(sig))) |
174 | 174 | ||
175 | extern __inline__ int sigfindinword(unsigned long word) | 175 | static inline int sigfindinword(unsigned long word) |
176 | { | 176 | { |
177 | __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); | 177 | __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); |
178 | return word; | 178 | return word; |
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index de8b57b2b62b..24e32611f0bf 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h | |||
@@ -72,7 +72,7 @@ static inline int num_booting_cpus(void) | |||
72 | 72 | ||
73 | #define raw_smp_processor_id() read_pda(cpunumber) | 73 | #define raw_smp_processor_id() read_pda(cpunumber) |
74 | 74 | ||
75 | extern __inline int hard_smp_processor_id(void) | 75 | static inline int hard_smp_processor_id(void) |
76 | { | 76 | { |
77 | /* we don't want to mark this access volatile - bad code generation */ | 77 | /* we don't want to mark this access volatile - bad code generation */ |
78 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); | 78 | return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); |
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3baad..69636831ad2f 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
@@ -6,47 +6,21 @@ | |||
6 | #include <asm/page.h> | 6 | #include <asm/page.h> |
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | 8 | ||
9 | extern int printk(const char * fmt, ...) | ||
10 | __attribute__ ((format (printf, 1, 2))); | ||
11 | |||
12 | /* | 9 | /* |
13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
14 | */ | 11 | * |
15 | |||
16 | typedef struct { | ||
17 | volatile unsigned int lock; | ||
18 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
19 | unsigned magic; | ||
20 | #endif | ||
21 | #ifdef CONFIG_PREEMPT | ||
22 | unsigned int break_lock; | ||
23 | #endif | ||
24 | } spinlock_t; | ||
25 | |||
26 | #define SPINLOCK_MAGIC 0xdead4ead | ||
27 | |||
28 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
29 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | ||
30 | #else | ||
31 | #define SPINLOCK_MAGIC_INIT /* */ | ||
32 | #endif | ||
33 | |||
34 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | ||
35 | |||
36 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
37 | |||
38 | /* | ||
39 | * Simple spin lock operations. There are two variants, one clears IRQ's | 12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
40 | * on the local processor, one does not. | 13 | * on the local processor, one does not. |
41 | * | 14 | * |
42 | * We make no fairness assumptions. They have a cost. | 15 | * We make no fairness assumptions. They have a cost. |
16 | * | ||
17 | * (the type definitions are in asm/spinlock_types.h) | ||
43 | */ | 18 | */ |
44 | 19 | ||
45 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | 20 | #define __raw_spin_is_locked(x) \ |
46 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 21 | (*(volatile signed char *)(&(x)->slock) <= 0) |
47 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
48 | 22 | ||
49 | #define spin_lock_string \ | 23 | #define __raw_spin_lock_string \ |
50 | "\n1:\t" \ | 24 | "\n1:\t" \ |
51 | "lock ; decb %0\n\t" \ | 25 | "lock ; decb %0\n\t" \ |
52 | "js 2f\n" \ | 26 | "js 2f\n" \ |
@@ -58,74 +32,40 @@ typedef struct { | |||
58 | "jmp 1b\n" \ | 32 | "jmp 1b\n" \ |
59 | LOCK_SECTION_END | 33 | LOCK_SECTION_END |
60 | 34 | ||
61 | /* | 35 | #define __raw_spin_unlock_string \ |
62 | * This works. Despite all the confusion. | ||
63 | * (except on PPro SMP or if we are using OOSTORE) | ||
64 | * (PPro errata 66, 92) | ||
65 | */ | ||
66 | |||
67 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | ||
68 | |||
69 | #define spin_unlock_string \ | ||
70 | "movb $1,%0" \ | 36 | "movb $1,%0" \ |
71 | :"=m" (lock->lock) : : "memory" | 37 | :"=m" (lock->slock) : : "memory" |
72 | |||
73 | |||
74 | static inline void _raw_spin_unlock(spinlock_t *lock) | ||
75 | { | ||
76 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
77 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
78 | assert_spin_locked(lock); | ||
79 | #endif | ||
80 | __asm__ __volatile__( | ||
81 | spin_unlock_string | ||
82 | ); | ||
83 | } | ||
84 | |||
85 | #else | ||
86 | |||
87 | #define spin_unlock_string \ | ||
88 | "xchgb %b0, %1" \ | ||
89 | :"=q" (oldval), "=m" (lock->lock) \ | ||
90 | :"0" (oldval) : "memory" | ||
91 | 38 | ||
92 | static inline void _raw_spin_unlock(spinlock_t *lock) | 39 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
93 | { | 40 | { |
94 | char oldval = 1; | ||
95 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
96 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | ||
97 | assert_spin_locked(lock); | ||
98 | #endif | ||
99 | __asm__ __volatile__( | 41 | __asm__ __volatile__( |
100 | spin_unlock_string | 42 | __raw_spin_lock_string |
101 | ); | 43 | :"=m" (lock->slock) : : "memory"); |
102 | } | 44 | } |
103 | 45 | ||
104 | #endif | 46 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
105 | 47 | ||
106 | static inline int _raw_spin_trylock(spinlock_t *lock) | 48 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
107 | { | 49 | { |
108 | char oldval; | 50 | char oldval; |
51 | |||
109 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
110 | "xchgb %b0,%1" | 53 | "xchgb %b0,%1" |
111 | :"=q" (oldval), "=m" (lock->lock) | 54 | :"=q" (oldval), "=m" (lock->slock) |
112 | :"0" (0) : "memory"); | 55 | :"0" (0) : "memory"); |
56 | |||
113 | return oldval > 0; | 57 | return oldval > 0; |
114 | } | 58 | } |
115 | 59 | ||
116 | static inline void _raw_spin_lock(spinlock_t *lock) | 60 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
117 | { | 61 | { |
118 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
119 | if (lock->magic != SPINLOCK_MAGIC) { | ||
120 | printk("eip: %p\n", __builtin_return_address(0)); | ||
121 | BUG(); | ||
122 | } | ||
123 | #endif | ||
124 | __asm__ __volatile__( | 62 | __asm__ __volatile__( |
125 | spin_lock_string | 63 | __raw_spin_unlock_string |
126 | :"=m" (lock->lock) : : "memory"); | 64 | ); |
127 | } | 65 | } |
128 | 66 | ||
67 | #define __raw_spin_unlock_wait(lock) \ | ||
68 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
129 | 69 | ||
130 | /* | 70 | /* |
131 | * Read-write spinlocks, allowing multiple readers | 71 | * Read-write spinlocks, allowing multiple readers |
@@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
136 | * can "mix" irq-safe locks - any writer needs to get a | 76 | * can "mix" irq-safe locks - any writer needs to get a |
137 | * irq-safe write-lock, but readers can get non-irqsafe | 77 | * irq-safe write-lock, but readers can get non-irqsafe |
138 | * read-locks. | 78 | * read-locks. |
139 | */ | 79 | * |
140 | typedef struct { | ||
141 | volatile unsigned int lock; | ||
142 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
143 | unsigned magic; | ||
144 | #endif | ||
145 | #ifdef CONFIG_PREEMPT | ||
146 | unsigned int break_lock; | ||
147 | #endif | ||
148 | } rwlock_t; | ||
149 | |||
150 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
153 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | ||
154 | #else | ||
155 | #define RWLOCK_MAGIC_INIT /* */ | ||
156 | #endif | ||
157 | |||
158 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | ||
159 | |||
160 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | ||
161 | |||
162 | #define read_can_lock(x) ((int)(x)->lock > 0) | ||
163 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
164 | |||
165 | /* | ||
166 | * On x86, we implement read-write locks as a 32-bit counter | 80 | * On x86, we implement read-write locks as a 32-bit counter |
167 | * with the high bit (sign) being the "contended" bit. | 81 | * with the high bit (sign) being the "contended" bit. |
168 | * | 82 | * |
@@ -170,29 +84,24 @@ typedef struct { | |||
170 | * | 84 | * |
171 | * Changed to use the same technique as rw semaphores. See | 85 | * Changed to use the same technique as rw semaphores. See |
172 | * semaphore.h for details. -ben | 86 | * semaphore.h for details. -ben |
87 | * | ||
88 | * the helpers are in arch/i386/kernel/semaphore.c | ||
173 | */ | 89 | */ |
174 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | ||
175 | 90 | ||
176 | static inline void _raw_read_lock(rwlock_t *rw) | 91 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) |
92 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
93 | |||
94 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
177 | { | 95 | { |
178 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
179 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
180 | #endif | ||
181 | __build_read_lock(rw, "__read_lock_failed"); | 96 | __build_read_lock(rw, "__read_lock_failed"); |
182 | } | 97 | } |
183 | 98 | ||
184 | static inline void _raw_write_lock(rwlock_t *rw) | 99 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
185 | { | 100 | { |
186 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
187 | BUG_ON(rw->magic != RWLOCK_MAGIC); | ||
188 | #endif | ||
189 | __build_write_lock(rw, "__write_lock_failed"); | 101 | __build_write_lock(rw, "__write_lock_failed"); |
190 | } | 102 | } |
191 | 103 | ||
192 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | 104 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
193 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | ||
194 | |||
195 | static inline int _raw_read_trylock(rwlock_t *lock) | ||
196 | { | 105 | { |
197 | atomic_t *count = (atomic_t *)lock; | 106 | atomic_t *count = (atomic_t *)lock; |
198 | atomic_dec(count); | 107 | atomic_dec(count); |
@@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) | |||
202 | return 0; | 111 | return 0; |
203 | } | 112 | } |
204 | 113 | ||
205 | static inline int _raw_write_trylock(rwlock_t *lock) | 114 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
206 | { | 115 | { |
207 | atomic_t *count = (atomic_t *)lock; | 116 | atomic_t *count = (atomic_t *)lock; |
208 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 117 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) | |||
211 | return 0; | 120 | return 0; |
212 | } | 121 | } |
213 | 122 | ||
123 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
124 | { | ||
125 | asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); | ||
126 | } | ||
127 | |||
128 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
129 | { | ||
130 | asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" | ||
131 | : "=m" (rw->lock) : : "memory"); | ||
132 | } | ||
133 | |||
214 | #endif /* __ASM_SPINLOCK_H */ | 134 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h new file mode 100644 index 000000000000..59efe849f351 --- /dev/null +++ b/include/asm-x86_64/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int slock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
19 | |||
20 | #endif | ||
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 8606e170a7dc..85348e02ad2e 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h | |||
@@ -188,7 +188,7 @@ static inline void write_cr4(unsigned long val) | |||
188 | 188 | ||
189 | #define __xg(x) ((volatile long *)(x)) | 189 | #define __xg(x) ((volatile long *)(x)) |
190 | 190 | ||
191 | extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val) | 191 | static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) |
192 | { | 192 | { |
193 | *ptr = val; | 193 | *ptr = val; |
194 | } | 194 | } |
@@ -253,19 +253,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
253 | case 2: | 253 | case 2: |
254 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" | 254 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" |
255 | : "=a"(prev) | 255 | : "=a"(prev) |
256 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 256 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
257 | : "memory"); | 257 | : "memory"); |
258 | return prev; | 258 | return prev; |
259 | case 4: | 259 | case 4: |
260 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" | 260 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" |
261 | : "=a"(prev) | 261 | : "=a"(prev) |
262 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 262 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
263 | : "memory"); | 263 | : "memory"); |
264 | return prev; | 264 | return prev; |
265 | case 8: | 265 | case 8: |
266 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" | 266 | __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" |
267 | : "=a"(prev) | 267 | : "=a"(prev) |
268 | : "q"(new), "m"(*__xg(ptr)), "0"(old) | 268 | : "r"(new), "m"(*__xg(ptr)), "0"(old) |
269 | : "memory"); | 269 | : "memory"); |
270 | return prev; | 270 | return prev; |
271 | } | 271 | } |
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index 24ecf6a637cb..f971f45d6d78 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #ifndef _ASMx8664_TIMEX_H | 6 | #ifndef _ASMx8664_TIMEX_H |
7 | #define _ASMx8664_TIMEX_H | 7 | #define _ASMx8664_TIMEX_H |
8 | 8 | ||
9 | #include <linux/config.h> | ||
10 | #include <asm/8253pit.h> | 9 | #include <asm/8253pit.h> |
11 | #include <asm/msr.h> | 10 | #include <asm/msr.h> |
12 | #include <asm/vsyscall.h> | 11 | #include <asm/vsyscall.h> |
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 505b0cf906de..4a9c20ea9b10 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h | |||
@@ -109,6 +109,10 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st | |||
109 | #define TLBSTATE_OK 1 | 109 | #define TLBSTATE_OK 1 |
110 | #define TLBSTATE_LAZY 2 | 110 | #define TLBSTATE_LAZY 2 |
111 | 111 | ||
112 | /* Roughly an IPI every 20MB with 4k pages for freeing page table | ||
113 | ranges. Cost is about 42k of memory for each CPU. */ | ||
114 | #define ARCH_FREE_PTE_NR 5350 | ||
115 | |||
112 | #endif | 116 | #endif |
113 | 117 | ||
114 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | 118 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() |
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c1bc3fad482e..1c603cd7e4d0 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
@@ -13,7 +13,6 @@ | |||
13 | extern cpumask_t cpu_online_map; | 13 | extern cpumask_t cpu_online_map; |
14 | 14 | ||
15 | extern unsigned char cpu_to_node[]; | 15 | extern unsigned char cpu_to_node[]; |
16 | extern unsigned char pci_bus_to_node[]; | ||
17 | extern cpumask_t node_to_cpumask[]; | 16 | extern cpumask_t node_to_cpumask[]; |
18 | 17 | ||
19 | #ifdef CONFIG_ACPI_NUMA | 18 | #ifdef CONFIG_ACPI_NUMA |
@@ -26,7 +25,7 @@ extern int __node_distance(int, int); | |||
26 | #define parent_node(node) (node) | 25 | #define parent_node(node) (node) |
27 | #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) | 26 | #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) |
28 | #define node_to_cpumask(node) (node_to_cpumask[node]) | 27 | #define node_to_cpumask(node) (node_to_cpumask[node]) |
29 | #define pcibus_to_node(bus) pci_bus_to_node[(bus)->number] | 28 | #define pcibus_to_node(bus) ((long)(bus->sysdata)) |
30 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); | 29 | #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); |
31 | 30 | ||
32 | /* sched_domains SD_NODE_INIT for x86_64 machines */ | 31 | /* sched_domains SD_NODE_INIT for x86_64 machines */ |
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 2872da23fc7e..438a3f52f839 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h | |||
@@ -29,7 +29,6 @@ enum vsyscall_num { | |||
29 | 29 | ||
30 | struct vxtime_data { | 30 | struct vxtime_data { |
31 | long hpet_address; /* HPET base address */ | 31 | long hpet_address; /* HPET base address */ |
32 | unsigned long hz; /* HPET clocks / sec */ | ||
33 | int last; | 32 | int last; |
34 | unsigned long last_tsc; | 33 | unsigned long last_tsc; |
35 | long quot; | 34 | long quot; |
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h index 2848a5ff8349..aa4fd7fb3ce7 100644 --- a/include/asm-xtensa/ptrace.h +++ b/include/asm-xtensa/ptrace.h | |||
@@ -127,7 +127,7 @@ extern void show_regs(struct pt_regs *); | |||
127 | #else /* __ASSEMBLY__ */ | 127 | #else /* __ASSEMBLY__ */ |
128 | 128 | ||
129 | #ifdef __KERNEL__ | 129 | #ifdef __KERNEL__ |
130 | # include <asm/offsets.h> | 130 | # include <asm/asm-offsets.h> |
131 | #define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE) | 131 | #define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE) |
132 | #endif | 132 | #endif |
133 | 133 | ||
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h index fc268ac923c0..06a22b83ba17 100644 --- a/include/asm-xtensa/uaccess.h +++ b/include/asm-xtensa/uaccess.h | |||
@@ -25,7 +25,7 @@ | |||
25 | 25 | ||
26 | #define _ASMLANGUAGE | 26 | #define _ASMLANGUAGE |
27 | #include <asm/current.h> | 27 | #include <asm/current.h> |
28 | #include <asm/offsets.h> | 28 | #include <asm/asm-offsets.h> |
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | 30 | ||
31 | /* | 31 | /* |
diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h index f7f0913cd110..c1237aa92e38 100644 --- a/include/linux/bfs_fs.h +++ b/include/linux/bfs_fs.h | |||
@@ -14,8 +14,9 @@ | |||
14 | #define BFS_INODES_PER_BLOCK 8 | 14 | #define BFS_INODES_PER_BLOCK 8 |
15 | 15 | ||
16 | /* SVR4 vnode type values (bfs_inode->i_vtype) */ | 16 | /* SVR4 vnode type values (bfs_inode->i_vtype) */ |
17 | #define BFS_VDIR 2 | 17 | #define BFS_VDIR 2L |
18 | #define BFS_VREG 1 | 18 | #define BFS_VREG 1L |
19 | |||
19 | 20 | ||
20 | /* BFS inode layout on disk */ | 21 | /* BFS inode layout on disk */ |
21 | struct bfs_inode { | 22 | struct bfs_inode { |
@@ -58,22 +59,22 @@ struct bfs_super_block { | |||
58 | __u32 s_padding[118]; | 59 | __u32 s_padding[118]; |
59 | }; | 60 | }; |
60 | 61 | ||
61 | #define BFS_NZFILESIZE(ip) \ | ||
62 | (((ip)->i_eoffset + 1) - (ip)->i_sblock * BFS_BSIZE) | ||
63 | |||
64 | #define BFS_FILESIZE(ip) \ | ||
65 | ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) | ||
66 | |||
67 | #define BFS_FILEBLOCKS(ip) \ | ||
68 | ((ip)->i_sblock == 0 ? 0 : ((ip)->i_eblock + 1) - (ip)->i_sblock) | ||
69 | 62 | ||
70 | #define BFS_OFF2INO(offset) \ | 63 | #define BFS_OFF2INO(offset) \ |
71 | ((((offset) - BFS_BSIZE) / sizeof(struct bfs_inode)) + BFS_ROOT_INO) | 64 | ((((offset) - BFS_BSIZE) / sizeof(struct bfs_inode)) + BFS_ROOT_INO) |
72 | 65 | ||
73 | #define BFS_INO2OFF(ino) \ | 66 | #define BFS_INO2OFF(ino) \ |
74 | ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) | 67 | ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) |
68 | #define BFS_NZFILESIZE(ip) \ | ||
69 | ((cpu_to_le32((ip)->i_eoffset) + 1) - cpu_to_le32((ip)->i_sblock) * BFS_BSIZE) | ||
70 | |||
71 | #define BFS_FILESIZE(ip) \ | ||
72 | ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) | ||
75 | 73 | ||
74 | #define BFS_FILEBLOCKS(ip) \ | ||
75 | ((ip)->i_sblock == 0 ? 0 : (cpu_to_le32((ip)->i_eblock) + 1) - cpu_to_le32((ip)->i_sblock)) | ||
76 | #define BFS_UNCLEAN(bfs_sb, sb) \ | 76 | #define BFS_UNCLEAN(bfs_sb, sb) \ |
77 | ((bfs_sb->s_from != -1) && (bfs_sb->s_to != -1) && !(sb->s_flags & MS_RDONLY)) | 77 | ((cpu_to_le32(bfs_sb->s_from) != -1) && (cpu_to_le32(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) |
78 | |||
78 | 79 | ||
79 | #endif /* _LINUX_BFS_FS_H */ | 80 | #endif /* _LINUX_BFS_FS_H */ |
diff --git a/include/linux/bio.h b/include/linux/bio.h index cdaf03a14a51..6e1c79c8b6bf 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -314,9 +314,8 @@ void zero_fill_bio(struct bio *bio); | |||
314 | * bvec_kmap_irq and bvec_kunmap_irq!! | 314 | * bvec_kmap_irq and bvec_kunmap_irq!! |
315 | * | 315 | * |
316 | * This function MUST be inlined - it plays with the CPU interrupt flags. | 316 | * This function MUST be inlined - it plays with the CPU interrupt flags. |
317 | * Hence the `extern inline'. | ||
318 | */ | 317 | */ |
319 | extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | 318 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
320 | { | 319 | { |
321 | unsigned long addr; | 320 | unsigned long addr; |
322 | 321 | ||
@@ -332,7 +331,7 @@ extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) | |||
332 | return (char *) addr + bvec->bv_offset; | 331 | return (char *) addr + bvec->bv_offset; |
333 | } | 332 | } |
334 | 333 | ||
335 | extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | 334 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
336 | { | 335 | { |
337 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; | 336 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
338 | 337 | ||
@@ -345,7 +344,7 @@ extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | |||
345 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) | 344 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) |
346 | #endif | 345 | #endif |
347 | 346 | ||
348 | extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, | 347 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
349 | unsigned long *flags) | 348 | unsigned long *flags) |
350 | { | 349 | { |
351 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); | 350 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); |
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h new file mode 100644 index 000000000000..6b20af0bbb79 --- /dev/null +++ b/include/linux/bit_spinlock.h | |||
@@ -0,0 +1,77 @@ | |||
1 | #ifndef __LINUX_BIT_SPINLOCK_H | ||
2 | #define __LINUX_BIT_SPINLOCK_H | ||
3 | |||
4 | /* | ||
5 | * bit-based spin_lock() | ||
6 | * | ||
7 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
8 | * are significantly faster. | ||
9 | */ | ||
10 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
11 | { | ||
12 | /* | ||
13 | * Assuming the lock is uncontended, this never enters | ||
14 | * the body of the outer loop. If it is contended, then | ||
15 | * within the inner loop a non-atomic test is used to | ||
16 | * busywait with less bus contention for a good time to | ||
17 | * attempt to acquire the lock bit. | ||
18 | */ | ||
19 | preempt_disable(); | ||
20 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
21 | while (test_and_set_bit(bitnum, addr)) { | ||
22 | while (test_bit(bitnum, addr)) { | ||
23 | preempt_enable(); | ||
24 | cpu_relax(); | ||
25 | preempt_disable(); | ||
26 | } | ||
27 | } | ||
28 | #endif | ||
29 | __acquire(bitlock); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Return true if it was acquired | ||
34 | */ | ||
35 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | ||
36 | { | ||
37 | preempt_disable(); | ||
38 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
39 | if (test_and_set_bit(bitnum, addr)) { | ||
40 | preempt_enable(); | ||
41 | return 0; | ||
42 | } | ||
43 | #endif | ||
44 | __acquire(bitlock); | ||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * bit-based spin_unlock() | ||
50 | */ | ||
51 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
52 | { | ||
53 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
54 | BUG_ON(!test_bit(bitnum, addr)); | ||
55 | smp_mb__before_clear_bit(); | ||
56 | clear_bit(bitnum, addr); | ||
57 | #endif | ||
58 | preempt_enable(); | ||
59 | __release(bitlock); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Return true if the lock is held. | ||
64 | */ | ||
65 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | ||
66 | { | ||
67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
68 | return test_bit(bitnum, addr); | ||
69 | #elif defined CONFIG_PREEMPT | ||
70 | return preempt_count(); | ||
71 | #else | ||
72 | return 1; | ||
73 | #endif | ||
74 | } | ||
75 | |||
76 | #endif /* __LINUX_BIT_SPINLOCK_H */ | ||
77 | |||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aefa26fbae8a..efdc9b5bc05c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -728,7 +728,7 @@ static inline unsigned int blksize_bits(unsigned int size) | |||
728 | return bits; | 728 | return bits; |
729 | } | 729 | } |
730 | 730 | ||
731 | extern inline unsigned int block_size(struct block_device *bdev) | 731 | static inline unsigned int block_size(struct block_device *bdev) |
732 | { | 732 | { |
733 | return bdev->bd_block_size; | 733 | return bdev->bd_block_size; |
734 | } | 734 | } |
diff --git a/include/linux/chio.h b/include/linux/chio.h index 63035ae67e63..a404c111c937 100644 --- a/include/linux/chio.h +++ b/include/linux/chio.h | |||
@@ -96,7 +96,7 @@ struct changer_position { | |||
96 | */ | 96 | */ |
97 | struct changer_element_status { | 97 | struct changer_element_status { |
98 | int ces_type; | 98 | int ces_type; |
99 | unsigned char *ces_data; | 99 | unsigned char __user *ces_data; |
100 | }; | 100 | }; |
101 | #define CESTATUS_FULL 0x01 /* full */ | 101 | #define CESTATUS_FULL 0x01 /* full */ |
102 | #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ | 102 | #define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */ |
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 007c290f74d4..8bf4bacb5051 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -432,7 +432,10 @@ struct dccp_sock { | |||
432 | struct ccid *dccps_hc_rx_ccid; | 432 | struct ccid *dccps_hc_rx_ccid; |
433 | struct ccid *dccps_hc_tx_ccid; | 433 | struct ccid *dccps_hc_tx_ccid; |
434 | struct dccp_options_received dccps_options_received; | 434 | struct dccp_options_received dccps_options_received; |
435 | struct timeval dccps_epoch; | ||
435 | enum dccp_role dccps_role:2; | 436 | enum dccp_role dccps_role:2; |
437 | __u8 dccps_hc_rx_insert_options:1; | ||
438 | __u8 dccps_hc_tx_insert_options:1; | ||
436 | }; | 439 | }; |
437 | 440 | ||
438 | static inline struct dccp_sock *dccp_sk(const struct sock *sk) | 441 | static inline struct dccp_sock *dccp_sk(const struct sock *sk) |
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index e60bfdac348d..4932ee5c77f0 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h | |||
@@ -19,7 +19,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, | |||
19 | 19 | ||
20 | void dma_pool_destroy(struct dma_pool *pool); | 20 | void dma_pool_destroy(struct dma_pool *pool); |
21 | 21 | ||
22 | void *dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle); | 22 | void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, |
23 | dma_addr_t *handle); | ||
23 | 24 | ||
24 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); | 25 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); |
25 | 26 | ||
diff --git a/include/linux/dmi.h b/include/linux/dmi.h index c30175e8dec6..a415f1d93e9a 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h | |||
@@ -70,7 +70,7 @@ extern struct dmi_device * dmi_find_device(int type, const char *name, | |||
70 | 70 | ||
71 | static inline int dmi_check_system(struct dmi_system_id *list) { return 0; } | 71 | static inline int dmi_check_system(struct dmi_system_id *list) { return 0; } |
72 | static inline char * dmi_get_system_info(int field) { return NULL; } | 72 | static inline char * dmi_get_system_info(int field) { return NULL; } |
73 | static struct dmi_device * dmi_find_device(int type, const char *name, | 73 | static inline struct dmi_device * dmi_find_device(int type, const char *name, |
74 | struct dmi_device *from) { return NULL; } | 74 | struct dmi_device *from) { return NULL; } |
75 | 75 | ||
76 | #endif | 76 | #endif |
diff --git a/include/linux/fb.h b/include/linux/fb.h index bc24beeed971..82e39cd0c4fb 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -107,6 +107,8 @@ | |||
107 | #define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ | 107 | #define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ |
108 | #define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ | 108 | #define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ |
109 | #define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ | 109 | #define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ |
110 | #define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ | ||
111 | #define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ | ||
110 | #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ | 112 | #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ |
111 | #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ | 113 | #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ |
112 | #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ | 114 | #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ |
@@ -495,6 +497,9 @@ struct fb_cursor_user { | |||
495 | #define FB_EVENT_BLANK 0x08 | 497 | #define FB_EVENT_BLANK 0x08 |
496 | /* Private modelist is to be replaced */ | 498 | /* Private modelist is to be replaced */ |
497 | #define FB_EVENT_NEW_MODELIST 0x09 | 499 | #define FB_EVENT_NEW_MODELIST 0x09 |
500 | /* The resolution of the passed in fb_info about to change and | ||
501 | all vc's should be changed */ | ||
502 | #define FB_EVENT_MODE_CHANGE_ALL 0x0A | ||
498 | 503 | ||
499 | struct fb_event { | 504 | struct fb_event { |
500 | struct fb_info *info; | 505 | struct fb_info *info; |
@@ -820,13 +825,29 @@ extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, | |||
820 | u32 height, u32 shift_high, u32 shift_low, u32 mod); | 825 | u32 height, u32 shift_high, u32 shift_low, u32 mod); |
821 | extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); | 826 | extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); |
822 | extern void fb_set_suspend(struct fb_info *info, int state); | 827 | extern void fb_set_suspend(struct fb_info *info, int state); |
823 | extern int fb_get_color_depth(struct fb_var_screeninfo *var); | 828 | extern int fb_get_color_depth(struct fb_var_screeninfo *var, |
829 | struct fb_fix_screeninfo *fix); | ||
824 | extern int fb_get_options(char *name, char **option); | 830 | extern int fb_get_options(char *name, char **option); |
825 | extern int fb_new_modelist(struct fb_info *info); | 831 | extern int fb_new_modelist(struct fb_info *info); |
826 | 832 | ||
827 | extern struct fb_info *registered_fb[FB_MAX]; | 833 | extern struct fb_info *registered_fb[FB_MAX]; |
828 | extern int num_registered_fb; | 834 | extern int num_registered_fb; |
829 | 835 | ||
836 | static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, | ||
837 | u8 *src, u32 s_pitch, u32 height) | ||
838 | { | ||
839 | int i, j; | ||
840 | |||
841 | d_pitch -= s_pitch; | ||
842 | |||
843 | for (i = height; i--; ) { | ||
844 | /* s_pitch is a few bytes at the most, memcpy is suboptimal */ | ||
845 | for (j = 0; j < s_pitch; j++) | ||
846 | *dst++ = *src++; | ||
847 | dst += d_pitch; | ||
848 | } | ||
849 | } | ||
850 | |||
830 | /* drivers/video/fbsysfs.c */ | 851 | /* drivers/video/fbsysfs.c */ |
831 | extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); | 852 | extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); |
832 | extern void framebuffer_release(struct fb_info *info); | 853 | extern void framebuffer_release(struct fb_info *info); |
@@ -856,8 +877,11 @@ extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, | |||
856 | extern int fb_validate_mode(const struct fb_var_screeninfo *var, | 877 | extern int fb_validate_mode(const struct fb_var_screeninfo *var, |
857 | struct fb_info *info); | 878 | struct fb_info *info); |
858 | extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); | 879 | extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); |
859 | extern void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs); | 880 | extern const unsigned char *fb_firmware_edid(struct device *device); |
881 | extern void fb_edid_to_monspecs(unsigned char *edid, | ||
882 | struct fb_monspecs *specs); | ||
860 | extern void fb_destroy_modedb(struct fb_videomode *modedb); | 883 | extern void fb_destroy_modedb(struct fb_videomode *modedb); |
884 | extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); | ||
861 | 885 | ||
862 | /* drivers/video/modedb.c */ | 886 | /* drivers/video/modedb.c */ |
863 | #define VESA_MODEDB_SIZE 34 | 887 | #define VESA_MODEDB_SIZE 34 |
diff --git a/include/linux/file.h b/include/linux/file.h index 5206beb9a80e..f5bbd4c508b3 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/posix_types.h> | 9 | #include <linux/posix_types.h> |
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/rcupdate.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * The default fd array needs to be at least BITS_PER_LONG, | 15 | * The default fd array needs to be at least BITS_PER_LONG, |
@@ -16,23 +17,33 @@ | |||
16 | */ | 17 | */ |
17 | #define NR_OPEN_DEFAULT BITS_PER_LONG | 18 | #define NR_OPEN_DEFAULT BITS_PER_LONG |
18 | 19 | ||
20 | struct fdtable { | ||
21 | unsigned int max_fds; | ||
22 | int max_fdset; | ||
23 | int next_fd; | ||
24 | struct file ** fd; /* current fd array */ | ||
25 | fd_set *close_on_exec; | ||
26 | fd_set *open_fds; | ||
27 | struct rcu_head rcu; | ||
28 | struct files_struct *free_files; | ||
29 | struct fdtable *next; | ||
30 | }; | ||
31 | |||
19 | /* | 32 | /* |
20 | * Open file table structure | 33 | * Open file table structure |
21 | */ | 34 | */ |
22 | struct files_struct { | 35 | struct files_struct { |
23 | atomic_t count; | 36 | atomic_t count; |
24 | spinlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */ | 37 | spinlock_t file_lock; /* Protects all the below members. Nests inside tsk->alloc_lock */ |
25 | int max_fds; | 38 | struct fdtable *fdt; |
26 | int max_fdset; | 39 | struct fdtable fdtab; |
27 | int next_fd; | ||
28 | struct file ** fd; /* current fd array */ | ||
29 | fd_set *close_on_exec; | ||
30 | fd_set *open_fds; | ||
31 | fd_set close_on_exec_init; | 40 | fd_set close_on_exec_init; |
32 | fd_set open_fds_init; | 41 | fd_set open_fds_init; |
33 | struct file * fd_array[NR_OPEN_DEFAULT]; | 42 | struct file * fd_array[NR_OPEN_DEFAULT]; |
34 | }; | 43 | }; |
35 | 44 | ||
45 | #define files_fdtable(files) (rcu_dereference((files)->fdt)) | ||
46 | |||
36 | extern void FASTCALL(__fput(struct file *)); | 47 | extern void FASTCALL(__fput(struct file *)); |
37 | extern void FASTCALL(fput(struct file *)); | 48 | extern void FASTCALL(fput(struct file *)); |
38 | 49 | ||
@@ -59,13 +70,16 @@ extern fd_set *alloc_fdset(int); | |||
59 | extern void free_fdset(fd_set *, int); | 70 | extern void free_fdset(fd_set *, int); |
60 | 71 | ||
61 | extern int expand_files(struct files_struct *, int nr); | 72 | extern int expand_files(struct files_struct *, int nr); |
73 | extern void free_fdtable(struct fdtable *fdt); | ||
74 | extern void __init files_defer_init(void); | ||
62 | 75 | ||
63 | static inline struct file * fcheck_files(struct files_struct *files, unsigned int fd) | 76 | static inline struct file * fcheck_files(struct files_struct *files, unsigned int fd) |
64 | { | 77 | { |
65 | struct file * file = NULL; | 78 | struct file * file = NULL; |
79 | struct fdtable *fdt = files_fdtable(files); | ||
66 | 80 | ||
67 | if (fd < files->max_fds) | 81 | if (fd < fdt->max_fds) |
68 | file = files->fd[fd]; | 82 | file = rcu_dereference(fdt->fd[fd]); |
69 | return file; | 83 | return file; |
70 | } | 84 | } |
71 | 85 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index fd93ab7da905..e0b77c5af9a0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
10 | #include <linux/limits.h> | 10 | #include <linux/limits.h> |
11 | #include <linux/ioctl.h> | 11 | #include <linux/ioctl.h> |
12 | #include <linux/rcuref.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * It's silly to have NR_OPEN bigger than NR_FILE, but you can change | 15 | * It's silly to have NR_OPEN bigger than NR_FILE, but you can change |
@@ -597,12 +598,13 @@ struct file { | |||
597 | spinlock_t f_ep_lock; | 598 | spinlock_t f_ep_lock; |
598 | #endif /* #ifdef CONFIG_EPOLL */ | 599 | #endif /* #ifdef CONFIG_EPOLL */ |
599 | struct address_space *f_mapping; | 600 | struct address_space *f_mapping; |
601 | struct rcu_head f_rcuhead; | ||
600 | }; | 602 | }; |
601 | extern spinlock_t files_lock; | 603 | extern spinlock_t files_lock; |
602 | #define file_list_lock() spin_lock(&files_lock); | 604 | #define file_list_lock() spin_lock(&files_lock); |
603 | #define file_list_unlock() spin_unlock(&files_lock); | 605 | #define file_list_unlock() spin_unlock(&files_lock); |
604 | 606 | ||
605 | #define get_file(x) atomic_inc(&(x)->f_count) | 607 | #define get_file(x) rcuref_inc(&(x)->f_count) |
606 | #define file_count(x) atomic_read(&(x)->f_count) | 608 | #define file_count(x) atomic_read(&(x)->f_count) |
607 | 609 | ||
608 | #define MAX_NON_LFS ((1UL<<31) - 1) | 610 | #define MAX_NON_LFS ((1UL<<31) - 1) |
@@ -1507,8 +1509,6 @@ extern void do_generic_mapping_read(struct address_space *mapping, | |||
1507 | loff_t *, read_descriptor_t *, read_actor_t); | 1509 | loff_t *, read_descriptor_t *, read_actor_t); |
1508 | extern void | 1510 | extern void |
1509 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); | 1511 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); |
1510 | extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, | ||
1511 | const struct iovec *iov, loff_t offset, unsigned long nr_segs); | ||
1512 | extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, | 1512 | extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, |
1513 | unsigned long nr_segs, loff_t *ppos); | 1513 | unsigned long nr_segs, loff_t *ppos); |
1514 | ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, | 1514 | ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, |
diff --git a/include/linux/fuse.h b/include/linux/fuse.h new file mode 100644 index 000000000000..acbeb96a3353 --- /dev/null +++ b/include/linux/fuse.h | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | FUSE: Filesystem in Userspace | ||
3 | Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> | ||
4 | |||
5 | This program can be distributed under the terms of the GNU GPL. | ||
6 | See the file COPYING. | ||
7 | */ | ||
8 | |||
9 | /* This file defines the kernel interface of FUSE */ | ||
10 | |||
11 | #include <asm/types.h> | ||
12 | |||
13 | /** Version number of this interface */ | ||
14 | #define FUSE_KERNEL_VERSION 7 | ||
15 | |||
16 | /** Minor version number of this interface */ | ||
17 | #define FUSE_KERNEL_MINOR_VERSION 2 | ||
18 | |||
19 | /** The node ID of the root inode */ | ||
20 | #define FUSE_ROOT_ID 1 | ||
21 | |||
22 | /** The major number of the fuse character device */ | ||
23 | #define FUSE_MAJOR 10 | ||
24 | |||
25 | /** The minor number of the fuse character device */ | ||
26 | #define FUSE_MINOR 229 | ||
27 | |||
28 | /* Make sure all structures are padded to 64bit boundary, so 32bit | ||
29 | userspace works under 64bit kernels */ | ||
30 | |||
31 | struct fuse_attr { | ||
32 | __u64 ino; | ||
33 | __u64 size; | ||
34 | __u64 blocks; | ||
35 | __u64 atime; | ||
36 | __u64 mtime; | ||
37 | __u64 ctime; | ||
38 | __u32 atimensec; | ||
39 | __u32 mtimensec; | ||
40 | __u32 ctimensec; | ||
41 | __u32 mode; | ||
42 | __u32 nlink; | ||
43 | __u32 uid; | ||
44 | __u32 gid; | ||
45 | __u32 rdev; | ||
46 | }; | ||
47 | |||
48 | struct fuse_kstatfs { | ||
49 | __u64 blocks; | ||
50 | __u64 bfree; | ||
51 | __u64 bavail; | ||
52 | __u64 files; | ||
53 | __u64 ffree; | ||
54 | __u32 bsize; | ||
55 | __u32 namelen; | ||
56 | }; | ||
57 | |||
58 | #define FATTR_MODE (1 << 0) | ||
59 | #define FATTR_UID (1 << 1) | ||
60 | #define FATTR_GID (1 << 2) | ||
61 | #define FATTR_SIZE (1 << 3) | ||
62 | #define FATTR_ATIME (1 << 4) | ||
63 | #define FATTR_MTIME (1 << 5) | ||
64 | #define FATTR_CTIME (1 << 6) | ||
65 | |||
66 | /** | ||
67 | * Flags returned by the OPEN request | ||
68 | * | ||
69 | * FOPEN_DIRECT_IO: bypass page cache for this open file | ||
70 | * FOPEN_KEEP_CACHE: don't invalidate the data cache on open | ||
71 | */ | ||
72 | #define FOPEN_DIRECT_IO (1 << 0) | ||
73 | #define FOPEN_KEEP_CACHE (1 << 1) | ||
74 | |||
75 | enum fuse_opcode { | ||
76 | FUSE_LOOKUP = 1, | ||
77 | FUSE_FORGET = 2, /* no reply */ | ||
78 | FUSE_GETATTR = 3, | ||
79 | FUSE_SETATTR = 4, | ||
80 | FUSE_READLINK = 5, | ||
81 | FUSE_SYMLINK = 6, | ||
82 | FUSE_MKNOD = 8, | ||
83 | FUSE_MKDIR = 9, | ||
84 | FUSE_UNLINK = 10, | ||
85 | FUSE_RMDIR = 11, | ||
86 | FUSE_RENAME = 12, | ||
87 | FUSE_LINK = 13, | ||
88 | FUSE_OPEN = 14, | ||
89 | FUSE_READ = 15, | ||
90 | FUSE_WRITE = 16, | ||
91 | FUSE_STATFS = 17, | ||
92 | FUSE_RELEASE = 18, | ||
93 | FUSE_FSYNC = 20, | ||
94 | FUSE_SETXATTR = 21, | ||
95 | FUSE_GETXATTR = 22, | ||
96 | FUSE_LISTXATTR = 23, | ||
97 | FUSE_REMOVEXATTR = 24, | ||
98 | FUSE_FLUSH = 25, | ||
99 | FUSE_INIT = 26, | ||
100 | FUSE_OPENDIR = 27, | ||
101 | FUSE_READDIR = 28, | ||
102 | FUSE_RELEASEDIR = 29, | ||
103 | FUSE_FSYNCDIR = 30 | ||
104 | }; | ||
105 | |||
106 | /* Conservative buffer size for the client */ | ||
107 | #define FUSE_MAX_IN 8192 | ||
108 | |||
109 | #define FUSE_NAME_MAX 1024 | ||
110 | #define FUSE_SYMLINK_MAX 4096 | ||
111 | #define FUSE_XATTR_SIZE_MAX 4096 | ||
112 | |||
113 | struct fuse_entry_out { | ||
114 | __u64 nodeid; /* Inode ID */ | ||
115 | __u64 generation; /* Inode generation: nodeid:gen must | ||
116 | be unique for the fs's lifetime */ | ||
117 | __u64 entry_valid; /* Cache timeout for the name */ | ||
118 | __u64 attr_valid; /* Cache timeout for the attributes */ | ||
119 | __u32 entry_valid_nsec; | ||
120 | __u32 attr_valid_nsec; | ||
121 | struct fuse_attr attr; | ||
122 | }; | ||
123 | |||
124 | struct fuse_forget_in { | ||
125 | __u64 nlookup; | ||
126 | }; | ||
127 | |||
128 | struct fuse_attr_out { | ||
129 | __u64 attr_valid; /* Cache timeout for the attributes */ | ||
130 | __u32 attr_valid_nsec; | ||
131 | __u32 dummy; | ||
132 | struct fuse_attr attr; | ||
133 | }; | ||
134 | |||
135 | struct fuse_mknod_in { | ||
136 | __u32 mode; | ||
137 | __u32 rdev; | ||
138 | }; | ||
139 | |||
140 | struct fuse_mkdir_in { | ||
141 | __u32 mode; | ||
142 | __u32 padding; | ||
143 | }; | ||
144 | |||
145 | struct fuse_rename_in { | ||
146 | __u64 newdir; | ||
147 | }; | ||
148 | |||
149 | struct fuse_link_in { | ||
150 | __u64 oldnodeid; | ||
151 | }; | ||
152 | |||
153 | struct fuse_setattr_in { | ||
154 | __u32 valid; | ||
155 | __u32 padding; | ||
156 | struct fuse_attr attr; | ||
157 | }; | ||
158 | |||
159 | struct fuse_open_in { | ||
160 | __u32 flags; | ||
161 | __u32 padding; | ||
162 | }; | ||
163 | |||
164 | struct fuse_open_out { | ||
165 | __u64 fh; | ||
166 | __u32 open_flags; | ||
167 | __u32 padding; | ||
168 | }; | ||
169 | |||
170 | struct fuse_release_in { | ||
171 | __u64 fh; | ||
172 | __u32 flags; | ||
173 | __u32 padding; | ||
174 | }; | ||
175 | |||
176 | struct fuse_flush_in { | ||
177 | __u64 fh; | ||
178 | __u32 flush_flags; | ||
179 | __u32 padding; | ||
180 | }; | ||
181 | |||
182 | struct fuse_read_in { | ||
183 | __u64 fh; | ||
184 | __u64 offset; | ||
185 | __u32 size; | ||
186 | __u32 padding; | ||
187 | }; | ||
188 | |||
189 | struct fuse_write_in { | ||
190 | __u64 fh; | ||
191 | __u64 offset; | ||
192 | __u32 size; | ||
193 | __u32 write_flags; | ||
194 | }; | ||
195 | |||
196 | struct fuse_write_out { | ||
197 | __u32 size; | ||
198 | __u32 padding; | ||
199 | }; | ||
200 | |||
201 | struct fuse_statfs_out { | ||
202 | struct fuse_kstatfs st; | ||
203 | }; | ||
204 | |||
205 | struct fuse_fsync_in { | ||
206 | __u64 fh; | ||
207 | __u32 fsync_flags; | ||
208 | __u32 padding; | ||
209 | }; | ||
210 | |||
211 | struct fuse_setxattr_in { | ||
212 | __u32 size; | ||
213 | __u32 flags; | ||
214 | }; | ||
215 | |||
216 | struct fuse_getxattr_in { | ||
217 | __u32 size; | ||
218 | __u32 padding; | ||
219 | }; | ||
220 | |||
221 | struct fuse_getxattr_out { | ||
222 | __u32 size; | ||
223 | __u32 padding; | ||
224 | }; | ||
225 | |||
226 | struct fuse_init_in_out { | ||
227 | __u32 major; | ||
228 | __u32 minor; | ||
229 | }; | ||
230 | |||
231 | struct fuse_in_header { | ||
232 | __u32 len; | ||
233 | __u32 opcode; | ||
234 | __u64 unique; | ||
235 | __u64 nodeid; | ||
236 | __u32 uid; | ||
237 | __u32 gid; | ||
238 | __u32 pid; | ||
239 | __u32 padding; | ||
240 | }; | ||
241 | |||
242 | struct fuse_out_header { | ||
243 | __u32 len; | ||
244 | __s32 error; | ||
245 | __u64 unique; | ||
246 | }; | ||
247 | |||
248 | struct fuse_dirent { | ||
249 | __u64 ino; | ||
250 | __u64 off; | ||
251 | __u32 namelen; | ||
252 | __u32 type; | ||
253 | char name[0]; | ||
254 | }; | ||
255 | |||
256 | #define FUSE_NAME_OFFSET ((unsigned) ((struct fuse_dirent *) 0)->name) | ||
257 | #define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) | ||
258 | #define FUSE_DIRENT_SIZE(d) \ | ||
259 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) | ||
diff --git a/include/linux/in6.h b/include/linux/in6.h index bd32b79d6295..304aaedea305 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h | |||
@@ -198,27 +198,43 @@ struct in6_flowlabel_req | |||
198 | * MCAST_MSFILTER 48 | 198 | * MCAST_MSFILTER 48 |
199 | */ | 199 | */ |
200 | 200 | ||
201 | /* RFC3542 advanced socket options (50-67) */ | 201 | /* |
202 | #define IPV6_RECVPKTINFO 50 | 202 | * Advanced API (RFC3542) (1) |
203 | #define IPV6_PKTINFO 51 | 203 | * |
204 | #if 0 | 204 | * Note: IPV6_RECVRTHDRDSTOPTS does not exist. see net/ipv6/datagram.c. |
205 | #define IPV6_RECVPATHMTU 52 | 205 | */ |
206 | #define IPV6_PATHMTU 53 | 206 | |
207 | #define IPV6_DONTFRAG 54 | 207 | #define IPV6_RECVPKTINFO 49 |
208 | #define IPV6_USE_MIN_MTU 55 | 208 | #define IPV6_PKTINFO 50 |
209 | #endif | 209 | #define IPV6_RECVHOPLIMIT 51 |
210 | #define IPV6_RECVHOPOPTS 56 | 210 | #define IPV6_HOPLIMIT 52 |
211 | #define IPV6_HOPOPTS 57 | 211 | #define IPV6_RECVHOPOPTS 53 |
212 | #if 0 | 212 | #define IPV6_HOPOPTS 54 |
213 | #define IPV6_RECVRTHDRDSTOPTS 58 /* Unused, see net/ipv6/datagram.c */ | 213 | #define IPV6_RTHDRDSTOPTS 55 |
214 | #define IPV6_RECVRTHDR 56 | ||
215 | #define IPV6_RTHDR 57 | ||
216 | #define IPV6_RECVDSTOPTS 58 | ||
217 | #define IPV6_DSTOPTS 59 | ||
218 | #if 0 /* not yet */ | ||
219 | #define IPV6_RECVPATHMTU 60 | ||
220 | #define IPV6_PATHMTU 61 | ||
221 | #define IPV6_DONTFRAG 62 | ||
222 | #define IPV6_USE_MIN_MTU 63 | ||
214 | #endif | 223 | #endif |
215 | #define IPV6_RTHDRDSTOPTS 59 | 224 | |
216 | #define IPV6_RECVRTHDR 60 | 225 | /* |
217 | #define IPV6_RTHDR 61 | 226 | * Netfilter |
218 | #define IPV6_RECVDSTOPTS 62 | 227 | * |
219 | #define IPV6_DSTOPTS 63 | 228 | * Following socket options are used in ip6_tables; |
220 | #define IPV6_RECVHOPLIMIT 64 | 229 | * see include/linux/netfilter_ipv6/ip6_tables.h. |
221 | #define IPV6_HOPLIMIT 65 | 230 | * |
231 | * IP6T_SO_SET_REPLACE / IP6T_SO_GET_INFO 64 | ||
232 | * IP6T_SO_SET_ADD_COUNTERS / IP6T_SO_GET_ENTRIES 65 | ||
233 | */ | ||
234 | |||
235 | /* | ||
236 | * Advanced API (RFC3542) (2) | ||
237 | */ | ||
222 | #define IPV6_RECVTCLASS 66 | 238 | #define IPV6_RECVTCLASS 66 |
223 | #define IPV6_TCLASS 67 | 239 | #define IPV6_TCLASS 67 |
224 | 240 | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index c727c195a91a..68ab5f2ab9cd 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -2,17 +2,27 @@ | |||
2 | #define _LINUX__INIT_TASK_H | 2 | #define _LINUX__INIT_TASK_H |
3 | 3 | ||
4 | #include <linux/file.h> | 4 | #include <linux/file.h> |
5 | #include <linux/rcupdate.h> | ||
5 | 6 | ||
6 | #define INIT_FILES \ | 7 | #define INIT_FDTABLE \ |
7 | { \ | 8 | { \ |
8 | .count = ATOMIC_INIT(1), \ | ||
9 | .file_lock = SPIN_LOCK_UNLOCKED, \ | ||
10 | .max_fds = NR_OPEN_DEFAULT, \ | 9 | .max_fds = NR_OPEN_DEFAULT, \ |
11 | .max_fdset = __FD_SETSIZE, \ | 10 | .max_fdset = __FD_SETSIZE, \ |
12 | .next_fd = 0, \ | 11 | .next_fd = 0, \ |
13 | .fd = &init_files.fd_array[0], \ | 12 | .fd = &init_files.fd_array[0], \ |
14 | .close_on_exec = &init_files.close_on_exec_init, \ | 13 | .close_on_exec = &init_files.close_on_exec_init, \ |
15 | .open_fds = &init_files.open_fds_init, \ | 14 | .open_fds = &init_files.open_fds_init, \ |
15 | .rcu = RCU_HEAD_INIT, \ | ||
16 | .free_files = NULL, \ | ||
17 | .next = NULL, \ | ||
18 | } | ||
19 | |||
20 | #define INIT_FILES \ | ||
21 | { \ | ||
22 | .count = ATOMIC_INIT(1), \ | ||
23 | .file_lock = SPIN_LOCK_UNLOCKED, \ | ||
24 | .fdt = &init_files.fdtab, \ | ||
25 | .fdtab = INIT_FDTABLE, \ | ||
16 | .close_on_exec_init = { { 0, } }, \ | 26 | .close_on_exec_init = { { 0, } }, \ |
17 | .open_fds_init = { { 0, } }, \ | 27 | .open_fds_init = { { 0, } }, \ |
18 | .fd_array = { NULL, } \ | 28 | .fd_array = { NULL, } \ |
diff --git a/include/linux/input.h b/include/linux/input.h index 4767e5429534..e8c296ff6257 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -289,6 +289,8 @@ struct input_absinfo { | |||
289 | #define KEY_SCROLLDOWN 178 | 289 | #define KEY_SCROLLDOWN 178 |
290 | #define KEY_KPLEFTPAREN 179 | 290 | #define KEY_KPLEFTPAREN 179 |
291 | #define KEY_KPRIGHTPAREN 180 | 291 | #define KEY_KPRIGHTPAREN 180 |
292 | #define KEY_NEW 181 | ||
293 | #define KEY_REDO 182 | ||
292 | 294 | ||
293 | #define KEY_F13 183 | 295 | #define KEY_F13 183 |
294 | #define KEY_F14 184 | 296 | #define KEY_F14 184 |
@@ -335,6 +337,12 @@ struct input_absinfo { | |||
335 | #define KEY_KBDILLUMDOWN 229 | 337 | #define KEY_KBDILLUMDOWN 229 |
336 | #define KEY_KBDILLUMUP 230 | 338 | #define KEY_KBDILLUMUP 230 |
337 | 339 | ||
340 | #define KEY_SEND 231 | ||
341 | #define KEY_REPLY 232 | ||
342 | #define KEY_FORWARDMAIL 233 | ||
343 | #define KEY_SAVE 234 | ||
344 | #define KEY_DOCUMENTS 235 | ||
345 | |||
338 | #define KEY_UNKNOWN 240 | 346 | #define KEY_UNKNOWN 240 |
339 | 347 | ||
340 | #define BTN_MISC 0x100 | 348 | #define BTN_MISC 0x100 |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d99e7aeb7d33..0a90205184b0 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -57,6 +57,11 @@ extern void disable_irq(unsigned int irq); | |||
57 | extern void enable_irq(unsigned int irq); | 57 | extern void enable_irq(unsigned int irq); |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | ||
61 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | ||
62 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | ||
63 | #endif | ||
64 | |||
60 | /* | 65 | /* |
61 | * Temporary defines for UP kernels, until all code gets fixed. | 66 | * Temporary defines for UP kernels, until all code gets fixed. |
62 | */ | 67 | */ |
@@ -123,7 +128,7 @@ struct softirq_action | |||
123 | asmlinkage void do_softirq(void); | 128 | asmlinkage void do_softirq(void); |
124 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); | 129 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); |
125 | extern void softirq_init(void); | 130 | extern void softirq_init(void); |
126 | #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) | 131 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
127 | extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); | 132 | extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); |
128 | extern void FASTCALL(raise_softirq(unsigned int nr)); | 133 | extern void FASTCALL(raise_softirq(unsigned int nr)); |
129 | 134 | ||
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 6c5f7b39a4b0..bb6f88e14061 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -68,7 +68,7 @@ struct ipv6_opt_hdr { | |||
68 | 68 | ||
69 | struct rt0_hdr { | 69 | struct rt0_hdr { |
70 | struct ipv6_rt_hdr rt_hdr; | 70 | struct ipv6_rt_hdr rt_hdr; |
71 | __u32 bitmap; /* strict/loose bit map */ | 71 | __u32 reserved; |
72 | struct in6_addr addr[0]; | 72 | struct in6_addr addr[0]; |
73 | 73 | ||
74 | #define rt0_type rt_hdr.type | 74 | #define rt0_type rt_hdr.type |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 84321a4cac93..de097269bd7f 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/buffer_head.h> | 28 | #include <linux/buffer_head.h> |
29 | #include <linux/journal-head.h> | 29 | #include <linux/journal-head.h> |
30 | #include <linux/stddef.h> | 30 | #include <linux/stddef.h> |
31 | #include <linux/bit_spinlock.h> | ||
31 | #include <asm/semaphore.h> | 32 | #include <asm/semaphore.h> |
32 | #endif | 33 | #endif |
33 | 34 | ||
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index d7a2555a886c..6acfdbba734b 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -254,23 +254,23 @@ static inline u64 get_jiffies_64(void) | |||
254 | */ | 254 | */ |
255 | static inline unsigned int jiffies_to_msecs(const unsigned long j) | 255 | static inline unsigned int jiffies_to_msecs(const unsigned long j) |
256 | { | 256 | { |
257 | #if HZ <= 1000 && !(1000 % HZ) | 257 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
258 | return (1000 / HZ) * j; | 258 | return (MSEC_PER_SEC / HZ) * j; |
259 | #elif HZ > 1000 && !(HZ % 1000) | 259 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
260 | return (j + (HZ / 1000) - 1)/(HZ / 1000); | 260 | return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); |
261 | #else | 261 | #else |
262 | return (j * 1000) / HZ; | 262 | return (j * MSEC_PER_SEC) / HZ; |
263 | #endif | 263 | #endif |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline unsigned int jiffies_to_usecs(const unsigned long j) | 266 | static inline unsigned int jiffies_to_usecs(const unsigned long j) |
267 | { | 267 | { |
268 | #if HZ <= 1000000 && !(1000000 % HZ) | 268 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
269 | return (1000000 / HZ) * j; | 269 | return (USEC_PER_SEC / HZ) * j; |
270 | #elif HZ > 1000000 && !(HZ % 1000000) | 270 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
271 | return (j + (HZ / 1000000) - 1)/(HZ / 1000000); | 271 | return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); |
272 | #else | 272 | #else |
273 | return (j * 1000000) / HZ; | 273 | return (j * USEC_PER_SEC) / HZ; |
274 | #endif | 274 | #endif |
275 | } | 275 | } |
276 | 276 | ||
@@ -278,12 +278,12 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m) | |||
278 | { | 278 | { |
279 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) | 279 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) |
280 | return MAX_JIFFY_OFFSET; | 280 | return MAX_JIFFY_OFFSET; |
281 | #if HZ <= 1000 && !(1000 % HZ) | 281 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
282 | return (m + (1000 / HZ) - 1) / (1000 / HZ); | 282 | return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); |
283 | #elif HZ > 1000 && !(HZ % 1000) | 283 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
284 | return m * (HZ / 1000); | 284 | return m * (HZ / MSEC_PER_SEC); |
285 | #else | 285 | #else |
286 | return (m * HZ + 999) / 1000; | 286 | return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; |
287 | #endif | 287 | #endif |
288 | } | 288 | } |
289 | 289 | ||
@@ -291,12 +291,12 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u) | |||
291 | { | 291 | { |
292 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) | 292 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) |
293 | return MAX_JIFFY_OFFSET; | 293 | return MAX_JIFFY_OFFSET; |
294 | #if HZ <= 1000000 && !(1000000 % HZ) | 294 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
295 | return (u + (1000000 / HZ) - 1) / (1000000 / HZ); | 295 | return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); |
296 | #elif HZ > 1000000 && !(HZ % 1000000) | 296 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
297 | return u * (HZ / 1000000); | 297 | return u * (HZ / USEC_PER_SEC); |
298 | #else | 298 | #else |
299 | return (u * HZ + 999999) / 1000000; | 299 | return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC; |
300 | #endif | 300 | #endif |
301 | } | 301 | } |
302 | 302 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 6caaba0af469..7349058ed778 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -315,8 +315,11 @@ static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *s | |||
315 | pci_bus_add_devices(root_bus); | 315 | pci_bus_add_devices(root_bus); |
316 | return root_bus; | 316 | return root_bus; |
317 | } | 317 | } |
318 | struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); | ||
319 | struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); | ||
318 | int pci_scan_slot(struct pci_bus *bus, int devfn); | 320 | int pci_scan_slot(struct pci_bus *bus, int devfn); |
319 | struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn); | 321 | struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn); |
322 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); | ||
320 | unsigned int pci_scan_child_bus(struct pci_bus *bus); | 323 | unsigned int pci_scan_child_bus(struct pci_bus *bus); |
321 | void pci_bus_add_device(struct pci_dev *dev); | 324 | void pci_bus_add_device(struct pci_dev *dev); |
322 | void pci_read_bridge_bases(struct pci_bus *child); | 325 | void pci_read_bridge_bases(struct pci_bus *child); |
@@ -326,6 +329,7 @@ extern struct pci_dev *pci_dev_get(struct pci_dev *dev); | |||
326 | extern void pci_dev_put(struct pci_dev *dev); | 329 | extern void pci_dev_put(struct pci_dev *dev); |
327 | extern void pci_remove_bus(struct pci_bus *b); | 330 | extern void pci_remove_bus(struct pci_bus *b); |
328 | extern void pci_remove_bus_device(struct pci_dev *dev); | 331 | extern void pci_remove_bus_device(struct pci_dev *dev); |
332 | void pci_setup_cardbus(struct pci_bus *bus); | ||
329 | 333 | ||
330 | /* Generic PCI functions exported to card drivers */ | 334 | /* Generic PCI functions exported to card drivers */ |
331 | 335 | ||
@@ -391,8 +395,8 @@ int pci_assign_resource(struct pci_dev *dev, int i); | |||
391 | void pci_restore_bars(struct pci_dev *dev); | 395 | void pci_restore_bars(struct pci_dev *dev); |
392 | 396 | ||
393 | /* ROM control related routines */ | 397 | /* ROM control related routines */ |
394 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size); | 398 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
395 | void __iomem *pci_map_rom_copy(struct pci_dev *pdev, size_t *size); | 399 | void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size); |
396 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); | 400 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
397 | void pci_remove_rom(struct pci_dev *pdev); | 401 | void pci_remove_rom(struct pci_dev *pdev); |
398 | 402 | ||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 8081a281fa5e..9c51917b1cce 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | struct radix_tree_root { | 25 | struct radix_tree_root { |
26 | unsigned int height; | 26 | unsigned int height; |
27 | int gfp_mask; | 27 | unsigned int gfp_mask; |
28 | struct radix_tree_node *rnode; | 28 | struct radix_tree_node *rnode; |
29 | }; | 29 | }; |
30 | 30 | ||
@@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); | |||
50 | unsigned int | 50 | unsigned int |
51 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 51 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
52 | unsigned long first_index, unsigned int max_items); | 52 | unsigned long first_index, unsigned int max_items); |
53 | int radix_tree_preload(int gfp_mask); | 53 | int radix_tree_preload(unsigned int __nocast gfp_mask); |
54 | void radix_tree_init(void); | 54 | void radix_tree_init(void); |
55 | void *radix_tree_tag_set(struct radix_tree_root *root, | 55 | void *radix_tree_tag_set(struct radix_tree_root *root, |
56 | unsigned long index, int tag); | 56 | unsigned long index, int tag); |
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h index 4bf1659f8aa8..9de99198caf1 100644 --- a/include/linux/raid/bitmap.h +++ b/include/linux/raid/bitmap.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #define BITMAP_H 1 | 7 | #define BITMAP_H 1 |
8 | 8 | ||
9 | #define BITMAP_MAJOR 3 | 9 | #define BITMAP_MAJOR 3 |
10 | #define BITMAP_MINOR 38 | 10 | #define BITMAP_MINOR 39 |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * in-memory bitmap: | 13 | * in-memory bitmap: |
@@ -147,8 +147,9 @@ typedef struct bitmap_super_s { | |||
147 | __u32 state; /* 48 bitmap state information */ | 147 | __u32 state; /* 48 bitmap state information */ |
148 | __u32 chunksize; /* 52 the bitmap chunk size in bytes */ | 148 | __u32 chunksize; /* 52 the bitmap chunk size in bytes */ |
149 | __u32 daemon_sleep; /* 56 seconds between disk flushes */ | 149 | __u32 daemon_sleep; /* 56 seconds between disk flushes */ |
150 | __u32 write_behind; /* 60 number of outstanding write-behind writes */ | ||
150 | 151 | ||
151 | __u8 pad[256 - 60]; /* set to zero */ | 152 | __u8 pad[256 - 64]; /* set to zero */ |
152 | } bitmap_super_t; | 153 | } bitmap_super_t; |
153 | 154 | ||
154 | /* notes: | 155 | /* notes: |
@@ -226,6 +227,9 @@ struct bitmap { | |||
226 | 227 | ||
227 | unsigned long flags; | 228 | unsigned long flags; |
228 | 229 | ||
230 | unsigned long max_write_behind; /* write-behind mode */ | ||
231 | atomic_t behind_writes; | ||
232 | |||
229 | /* | 233 | /* |
230 | * the bitmap daemon - periodically wakes up and sweeps the bitmap | 234 | * the bitmap daemon - periodically wakes up and sweeps the bitmap |
231 | * file, cleaning up bits and flushing out pages to disk as necessary | 235 | * file, cleaning up bits and flushing out pages to disk as necessary |
@@ -260,9 +264,10 @@ int bitmap_setallbits(struct bitmap *bitmap); | |||
260 | void bitmap_write_all(struct bitmap *bitmap); | 264 | void bitmap_write_all(struct bitmap *bitmap); |
261 | 265 | ||
262 | /* these are exported */ | 266 | /* these are exported */ |
263 | int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors); | 267 | int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, |
264 | void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, | 268 | unsigned long sectors, int behind); |
265 | int success); | 269 | void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, |
270 | unsigned long sectors, int success, int behind); | ||
266 | int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded); | 271 | int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded); |
267 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); | 272 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); |
268 | void bitmap_close_sync(struct bitmap *bitmap); | 273 | void bitmap_close_sync(struct bitmap *bitmap); |
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h index e04c4fe45b53..7eaf290e10e7 100644 --- a/include/linux/raid/linear.h +++ b/include/linux/raid/linear.h | |||
@@ -14,8 +14,8 @@ typedef struct dev_info dev_info_t; | |||
14 | struct linear_private_data | 14 | struct linear_private_data |
15 | { | 15 | { |
16 | dev_info_t **hash_table; | 16 | dev_info_t **hash_table; |
17 | dev_info_t *smallest; | 17 | sector_t hash_spacing; |
18 | int nr_zones; | 18 | int preshift; /* shift before dividing by hash_spacing */ |
19 | dev_info_t disks[0]; | 19 | dev_info_t disks[0]; |
20 | }; | 20 | }; |
21 | 21 | ||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 8c14ba565a45..ebce949b1443 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -86,70 +86,6 @@ typedef struct mdk_rdev_s mdk_rdev_t; | |||
86 | #define MAX_CHUNK_SIZE (4096*1024) | 86 | #define MAX_CHUNK_SIZE (4096*1024) |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * default readahead | ||
90 | */ | ||
91 | |||
92 | static inline int disk_faulty(mdp_disk_t * d) | ||
93 | { | ||
94 | return d->state & (1 << MD_DISK_FAULTY); | ||
95 | } | ||
96 | |||
97 | static inline int disk_active(mdp_disk_t * d) | ||
98 | { | ||
99 | return d->state & (1 << MD_DISK_ACTIVE); | ||
100 | } | ||
101 | |||
102 | static inline int disk_sync(mdp_disk_t * d) | ||
103 | { | ||
104 | return d->state & (1 << MD_DISK_SYNC); | ||
105 | } | ||
106 | |||
107 | static inline int disk_spare(mdp_disk_t * d) | ||
108 | { | ||
109 | return !disk_sync(d) && !disk_active(d) && !disk_faulty(d); | ||
110 | } | ||
111 | |||
112 | static inline int disk_removed(mdp_disk_t * d) | ||
113 | { | ||
114 | return d->state & (1 << MD_DISK_REMOVED); | ||
115 | } | ||
116 | |||
117 | static inline void mark_disk_faulty(mdp_disk_t * d) | ||
118 | { | ||
119 | d->state |= (1 << MD_DISK_FAULTY); | ||
120 | } | ||
121 | |||
122 | static inline void mark_disk_active(mdp_disk_t * d) | ||
123 | { | ||
124 | d->state |= (1 << MD_DISK_ACTIVE); | ||
125 | } | ||
126 | |||
127 | static inline void mark_disk_sync(mdp_disk_t * d) | ||
128 | { | ||
129 | d->state |= (1 << MD_DISK_SYNC); | ||
130 | } | ||
131 | |||
132 | static inline void mark_disk_spare(mdp_disk_t * d) | ||
133 | { | ||
134 | d->state = 0; | ||
135 | } | ||
136 | |||
137 | static inline void mark_disk_removed(mdp_disk_t * d) | ||
138 | { | ||
139 | d->state = (1 << MD_DISK_FAULTY) | (1 << MD_DISK_REMOVED); | ||
140 | } | ||
141 | |||
142 | static inline void mark_disk_inactive(mdp_disk_t * d) | ||
143 | { | ||
144 | d->state &= ~(1 << MD_DISK_ACTIVE); | ||
145 | } | ||
146 | |||
147 | static inline void mark_disk_nonsync(mdp_disk_t * d) | ||
148 | { | ||
149 | d->state &= ~(1 << MD_DISK_SYNC); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * MD's 'extended' device | 89 | * MD's 'extended' device |
154 | */ | 90 | */ |
155 | struct mdk_rdev_s | 91 | struct mdk_rdev_s |
@@ -166,6 +102,7 @@ struct mdk_rdev_s | |||
166 | int sb_loaded; | 102 | int sb_loaded; |
167 | sector_t data_offset; /* start of data in array */ | 103 | sector_t data_offset; /* start of data in array */ |
168 | sector_t sb_offset; | 104 | sector_t sb_offset; |
105 | int sb_size; /* bytes in the superblock */ | ||
169 | int preferred_minor; /* autorun support */ | 106 | int preferred_minor; /* autorun support */ |
170 | 107 | ||
171 | /* A device can be in one of three states based on two flags: | 108 | /* A device can be in one of three states based on two flags: |
@@ -181,6 +118,9 @@ struct mdk_rdev_s | |||
181 | int faulty; /* if faulty do not issue IO requests */ | 118 | int faulty; /* if faulty do not issue IO requests */ |
182 | int in_sync; /* device is a full member of the array */ | 119 | int in_sync; /* device is a full member of the array */ |
183 | 120 | ||
121 | unsigned long flags; /* Should include faulty and in_sync here. */ | ||
122 | #define WriteMostly 4 /* Avoid reading if at all possible */ | ||
123 | |||
184 | int desc_nr; /* descriptor index in the superblock */ | 124 | int desc_nr; /* descriptor index in the superblock */ |
185 | int raid_disk; /* role of device in array */ | 125 | int raid_disk; /* role of device in array */ |
186 | int saved_raid_disk; /* role that device used to have in the | 126 | int saved_raid_disk; /* role that device used to have in the |
@@ -272,12 +212,19 @@ struct mddev_s | |||
272 | atomic_t writes_pending; | 212 | atomic_t writes_pending; |
273 | request_queue_t *queue; /* for plugging ... */ | 213 | request_queue_t *queue; /* for plugging ... */ |
274 | 214 | ||
215 | atomic_t write_behind; /* outstanding async IO */ | ||
216 | unsigned int max_write_behind; /* 0 = sync */ | ||
217 | |||
275 | struct bitmap *bitmap; /* the bitmap for the device */ | 218 | struct bitmap *bitmap; /* the bitmap for the device */ |
276 | struct file *bitmap_file; /* the bitmap file */ | 219 | struct file *bitmap_file; /* the bitmap file */ |
277 | long bitmap_offset; /* offset from superblock of | 220 | long bitmap_offset; /* offset from superblock of |
278 | * start of bitmap. May be | 221 | * start of bitmap. May be |
279 | * negative, but not '0' | 222 | * negative, but not '0' |
280 | */ | 223 | */ |
224 | long default_bitmap_offset; /* this is the offset to use when | ||
225 | * hot-adding a bitmap. It should | ||
226 | * eventually be settable by sysfs. | ||
227 | */ | ||
281 | 228 | ||
282 | struct list_head all_mddevs; | 229 | struct list_head all_mddevs; |
283 | }; | 230 | }; |
@@ -314,6 +261,12 @@ struct mdk_personality_s | |||
314 | int (*resize) (mddev_t *mddev, sector_t sectors); | 261 | int (*resize) (mddev_t *mddev, sector_t sectors); |
315 | int (*reshape) (mddev_t *mddev, int raid_disks); | 262 | int (*reshape) (mddev_t *mddev, int raid_disks); |
316 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); | 263 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); |
264 | /* quiesce moves between quiescence states | ||
265 | * 0 - fully active | ||
266 | * 1 - no new requests allowed | ||
267 | * others - reserved | ||
268 | */ | ||
269 | void (*quiesce) (mddev_t *mddev, int state); | ||
317 | }; | 270 | }; |
318 | 271 | ||
319 | 272 | ||
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index dc65cd435494..c100fa5d4bfa 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h | |||
@@ -79,6 +79,11 @@ | |||
79 | #define MD_DISK_SYNC 2 /* disk is in sync with the raid set */ | 79 | #define MD_DISK_SYNC 2 /* disk is in sync with the raid set */ |
80 | #define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */ | 80 | #define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */ |
81 | 81 | ||
82 | #define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config. | ||
83 | * read requests will only be sent here in | ||
84 | * dire need | ||
85 | */ | ||
86 | |||
82 | typedef struct mdp_device_descriptor_s { | 87 | typedef struct mdp_device_descriptor_s { |
83 | __u32 number; /* 0 Device number in the entire set */ | 88 | __u32 number; /* 0 Device number in the entire set */ |
84 | __u32 major; /* 1 Device major number */ | 89 | __u32 major; /* 1 Device major number */ |
@@ -193,7 +198,7 @@ struct mdp_superblock_1 { | |||
193 | 198 | ||
194 | __u64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/ | 199 | __u64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/ |
195 | __u32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */ | 200 | __u32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */ |
196 | __u32 layout; /* only for raid5 currently */ | 201 | __u32 layout; /* only for raid5 and raid10 currently */ |
197 | __u64 size; /* used size of component devices, in 512byte sectors */ | 202 | __u64 size; /* used size of component devices, in 512byte sectors */ |
198 | 203 | ||
199 | __u32 chunksize; /* in 512byte sectors */ | 204 | __u32 chunksize; /* in 512byte sectors */ |
@@ -212,7 +217,9 @@ struct mdp_superblock_1 { | |||
212 | __u32 dev_number; /* permanent identifier of this device - not role in raid */ | 217 | __u32 dev_number; /* permanent identifier of this device - not role in raid */ |
213 | __u32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ | 218 | __u32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ |
214 | __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ | 219 | __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ |
215 | __u8 pad2[64-56]; /* set to 0 when writing */ | 220 | __u8 devflags; /* per-device flags. Only one defined...*/ |
221 | #define WriteMostly1 1 /* mask for writemostly flag in above */ | ||
222 | __u8 pad2[64-57]; /* set to 0 when writing */ | ||
216 | 223 | ||
217 | /* array state information - 64 bytes */ | 224 | /* array state information - 64 bytes */ |
218 | __u64 utime; /* 40 bits second, 24 btes microseconds */ | 225 | __u64 utime; /* 40 bits second, 24 btes microseconds */ |
@@ -231,5 +238,10 @@ struct mdp_superblock_1 { | |||
231 | __u16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */ | 238 | __u16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */ |
232 | }; | 239 | }; |
233 | 240 | ||
241 | /* feature_map bits */ | ||
242 | #define MD_FEATURE_BITMAP_OFFSET 1 | ||
243 | |||
244 | #define MD_FEATURE_ALL 1 | ||
245 | |||
234 | #endif | 246 | #endif |
235 | 247 | ||
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index 9d93cf12e890..60e19b667548 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h | |||
@@ -80,6 +80,9 @@ struct r1bio_s { | |||
80 | atomic_t remaining; /* 'have we finished' count, | 80 | atomic_t remaining; /* 'have we finished' count, |
81 | * used from IRQ handlers | 81 | * used from IRQ handlers |
82 | */ | 82 | */ |
83 | atomic_t behind_remaining; /* number of write-behind ios remaining | ||
84 | * in this BehindIO request | ||
85 | */ | ||
83 | sector_t sector; | 86 | sector_t sector; |
84 | int sectors; | 87 | int sectors; |
85 | unsigned long state; | 88 | unsigned long state; |
@@ -107,4 +110,14 @@ struct r1bio_s { | |||
107 | #define R1BIO_Uptodate 0 | 110 | #define R1BIO_Uptodate 0 |
108 | #define R1BIO_IsSync 1 | 111 | #define R1BIO_IsSync 1 |
109 | #define R1BIO_Degraded 2 | 112 | #define R1BIO_Degraded 2 |
113 | #define R1BIO_BehindIO 3 | ||
114 | /* For write-behind requests, we call bi_end_io when | ||
115 | * the last non-write-behind device completes, providing | ||
116 | * any write was successful. Otherwise we call when | ||
117 | * any write-behind write succeeds, otherwise we call | ||
118 | * with failure when last write completes (and all failed). | ||
119 | * Record that bi_end_io was called with this flag... | ||
120 | */ | ||
121 | #define R1BIO_Returned 4 | ||
122 | |||
110 | #endif | 123 | #endif |
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index d63ddcb4afad..176fc653c284 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h | |||
@@ -134,6 +134,7 @@ struct stripe_head { | |||
134 | unsigned long state; /* state flags */ | 134 | unsigned long state; /* state flags */ |
135 | atomic_t count; /* nr of active thread/requests */ | 135 | atomic_t count; /* nr of active thread/requests */ |
136 | spinlock_t lock; | 136 | spinlock_t lock; |
137 | int bm_seq; /* sequence number for bitmap flushes */ | ||
137 | struct r5dev { | 138 | struct r5dev { |
138 | struct bio req; | 139 | struct bio req; |
139 | struct bio_vec vec; | 140 | struct bio_vec vec; |
@@ -165,12 +166,13 @@ struct stripe_head { | |||
165 | /* | 166 | /* |
166 | * Stripe state | 167 | * Stripe state |
167 | */ | 168 | */ |
168 | #define STRIPE_ERROR 1 | ||
169 | #define STRIPE_HANDLE 2 | 169 | #define STRIPE_HANDLE 2 |
170 | #define STRIPE_SYNCING 3 | 170 | #define STRIPE_SYNCING 3 |
171 | #define STRIPE_INSYNC 4 | 171 | #define STRIPE_INSYNC 4 |
172 | #define STRIPE_PREREAD_ACTIVE 5 | 172 | #define STRIPE_PREREAD_ACTIVE 5 |
173 | #define STRIPE_DELAYED 6 | 173 | #define STRIPE_DELAYED 6 |
174 | #define STRIPE_DEGRADED 7 | ||
175 | #define STRIPE_BIT_DELAY 8 | ||
174 | 176 | ||
175 | /* | 177 | /* |
176 | * Plugging: | 178 | * Plugging: |
@@ -210,10 +212,20 @@ struct raid5_private_data { | |||
210 | 212 | ||
211 | struct list_head handle_list; /* stripes needing handling */ | 213 | struct list_head handle_list; /* stripes needing handling */ |
212 | struct list_head delayed_list; /* stripes that have plugged requests */ | 214 | struct list_head delayed_list; /* stripes that have plugged requests */ |
215 | struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ | ||
213 | atomic_t preread_active_stripes; /* stripes with scheduled io */ | 216 | atomic_t preread_active_stripes; /* stripes with scheduled io */ |
214 | 217 | ||
215 | char cache_name[20]; | 218 | char cache_name[20]; |
216 | kmem_cache_t *slab_cache; /* for allocating stripes */ | 219 | kmem_cache_t *slab_cache; /* for allocating stripes */ |
220 | |||
221 | int seq_flush, seq_write; | ||
222 | int quiesce; | ||
223 | |||
224 | int fullsync; /* set to 1 if a full sync is needed, | ||
225 | * (fresh device added). | ||
226 | * Cleared when a sync completes. | ||
227 | */ | ||
228 | |||
217 | /* | 229 | /* |
218 | * Free stripes pool | 230 | * Free stripes pool |
219 | */ | 231 | */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index fd276adf0fd5..4e65eb44adfd 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,8 +52,8 @@ struct rcu_head { | |||
52 | void (*func)(struct rcu_head *head); | 52 | void (*func)(struct rcu_head *head); |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #define RCU_HEAD_INIT(head) { .next = NULL, .func = NULL } | 55 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
56 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT(head) | 56 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
57 | #define INIT_RCU_HEAD(ptr) do { \ | 57 | #define INIT_RCU_HEAD(ptr) do { \ |
58 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 58 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
59 | } while (0) | 59 | } while (0) |
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h new file mode 100644 index 000000000000..e1adbba14b67 --- /dev/null +++ b/include/linux/rcuref.h | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * rcuref.h | ||
3 | * | ||
4 | * Reference counting for elements of lists/arrays protected by | ||
5 | * RCU. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Copyright (C) IBM Corporation, 2005 | ||
22 | * | ||
23 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
24 | * Ravikiran Thirumalai <kiran_th@gmail.com> | ||
25 | * | ||
26 | * See Documentation/RCU/rcuref.txt for detailed user guide. | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #ifndef _RCUREF_H_ | ||
31 | #define _RCUREF_H_ | ||
32 | |||
33 | #ifdef __KERNEL__ | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | #include <linux/interrupt.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <asm/atomic.h> | ||
39 | |||
40 | /* | ||
41 | * These APIs work on traditional atomic_t counters used in the | ||
42 | * kernel for reference counting. Under special circumstances | ||
43 | * where a lock-free get() operation races with a put() operation | ||
44 | * these APIs can be used. See Documentation/RCU/rcuref.txt. | ||
45 | */ | ||
46 | |||
47 | #ifdef __HAVE_ARCH_CMPXCHG | ||
48 | |||
49 | /** | ||
50 | * rcuref_inc - increment refcount for object. | ||
51 | * @rcuref: reference counter in the object in question. | ||
52 | * | ||
53 | * This should be used only for objects where we use RCU and | ||
54 | * use the rcuref_inc_lf() api to acquire a reference | ||
55 | * in a lock-free reader-side critical section. | ||
56 | */ | ||
57 | static inline void rcuref_inc(atomic_t *rcuref) | ||
58 | { | ||
59 | atomic_inc(rcuref); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * rcuref_dec - decrement refcount for object. | ||
64 | * @rcuref: reference counter in the object in question. | ||
65 | * | ||
66 | * This should be used only for objects where we use RCU and | ||
67 | * use the rcuref_inc_lf() api to acquire a reference | ||
68 | * in a lock-free reader-side critical section. | ||
69 | */ | ||
70 | static inline void rcuref_dec(atomic_t *rcuref) | ||
71 | { | ||
72 | atomic_dec(rcuref); | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * rcuref_dec_and_test - decrement refcount for object and test | ||
77 | * @rcuref: reference counter in the object. | ||
78 | * @release: pointer to the function that will clean up the object | ||
79 | * when the last reference to the object is released. | ||
80 | * This pointer is required. | ||
81 | * | ||
82 | * Decrement the refcount, and if 0, return 1. Else return 0. | ||
83 | * | ||
84 | * This should be used only for objects where we use RCU and | ||
85 | * use the rcuref_inc_lf() api to acquire a reference | ||
86 | * in a lock-free reader-side critical section. | ||
87 | */ | ||
88 | static inline int rcuref_dec_and_test(atomic_t *rcuref) | ||
89 | { | ||
90 | return atomic_dec_and_test(rcuref); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * cmpxchg is needed on UP too, if deletions to the list/array can happen | ||
95 | * in interrupt context. | ||
96 | */ | ||
97 | |||
98 | /** | ||
99 | * rcuref_inc_lf - Take reference to an object in a read-side | ||
100 | * critical section protected by RCU. | ||
101 | * @rcuref: reference counter in the object in question. | ||
102 | * | ||
103 | * Try and increment the refcount by 1. The increment might fail if | ||
104 | * the reference counter has been through a 1 to 0 transition and | ||
105 | * is no longer part of the lock-free list. | ||
106 | * Returns non-zero on successful increment and zero otherwise. | ||
107 | */ | ||
108 | static inline int rcuref_inc_lf(atomic_t *rcuref) | ||
109 | { | ||
110 | int c, old; | ||
111 | c = atomic_read(rcuref); | ||
112 | while (c && (old = cmpxchg(&rcuref->counter, c, c + 1)) != c) | ||
113 | c = old; | ||
114 | return c; | ||
115 | } | ||
116 | |||
117 | #else /* !__HAVE_ARCH_CMPXCHG */ | ||
118 | |||
119 | extern spinlock_t __rcuref_hash[]; | ||
120 | |||
121 | /* | ||
122 | * Use a hash table of locks to protect the reference count | ||
123 | * since cmpxchg is not available in this arch. | ||
124 | */ | ||
125 | #ifdef CONFIG_SMP | ||
126 | #define RCUREF_HASH_SIZE 4 | ||
127 | #define RCUREF_HASH(k) \ | ||
128 | (&__rcuref_hash[(((unsigned long)k)>>8) & (RCUREF_HASH_SIZE-1)]) | ||
129 | #else | ||
130 | #define RCUREF_HASH_SIZE 1 | ||
131 | #define RCUREF_HASH(k) &__rcuref_hash[0] | ||
132 | #endif /* CONFIG_SMP */ | ||
133 | |||
134 | /** | ||
135 | * rcuref_inc - increment refcount for object. | ||
136 | * @rcuref: reference counter in the object in question. | ||
137 | * | ||
138 | * This should be used only for objects where we use RCU and | ||
139 | * use the rcuref_inc_lf() api to acquire a reference in a lock-free | ||
140 | * reader-side critical section. | ||
141 | */ | ||
142 | static inline void rcuref_inc(atomic_t *rcuref) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | spin_lock_irqsave(RCUREF_HASH(rcuref), flags); | ||
146 | rcuref->counter += 1; | ||
147 | spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags); | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * rcuref_dec - decrement refcount for object. | ||
152 | * @rcuref: reference counter in the object in question. | ||
153 | * | ||
154 | * This should be used only for objects where we use RCU and | ||
155 | * use the rcuref_inc_lf() api to acquire a reference in a lock-free | ||
156 | * reader-side critical section. | ||
157 | */ | ||
158 | static inline void rcuref_dec(atomic_t *rcuref) | ||
159 | { | ||
160 | unsigned long flags; | ||
161 | spin_lock_irqsave(RCUREF_HASH(rcuref), flags); | ||
162 | rcuref->counter -= 1; | ||
163 | spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags); | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * rcuref_dec_and_test - decrement refcount for object and test | ||
168 | * @rcuref: reference counter in the object. | ||
169 | * @release: pointer to the function that will clean up the object | ||
170 | * when the last reference to the object is released. | ||
171 | * This pointer is required. | ||
172 | * | ||
173 | * Decrement the refcount, and if 0, return 1. Else return 0. | ||
174 | * | ||
175 | * This should be used only for objects where we use RCU and | ||
176 | * use the rcuref_inc_lf() api to acquire a reference in a lock-free | ||
177 | * reader-side critical section. | ||
178 | */ | ||
179 | static inline int rcuref_dec_and_test(atomic_t *rcuref) | ||
180 | { | ||
181 | unsigned long flags; | ||
182 | spin_lock_irqsave(RCUREF_HASH(rcuref), flags); | ||
183 | rcuref->counter--; | ||
184 | if (!rcuref->counter) { | ||
185 | spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags); | ||
186 | return 1; | ||
187 | } else { | ||
188 | spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags); | ||
189 | return 0; | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * rcuref_inc_lf - Take reference to an object of a lock-free collection | ||
195 | * by traversing a lock-free list/array. | ||
196 | * @rcuref: reference counter in the object in question. | ||
197 | * | ||
198 | * Try and increment the refcount by 1. The increment might fail if | ||
199 | * the reference counter has been through a 1 to 0 transition and | ||
200 | * object is no longer part of the lock-free list. | ||
201 | * Returns non-zero on successful increment and zero otherwise. | ||
202 | */ | ||
203 | static inline int rcuref_inc_lf(atomic_t *rcuref) | ||
204 | { | ||
205 | int ret; | ||
206 | unsigned long flags; | ||
207 | spin_lock_irqsave(RCUREF_HASH(rcuref), flags); | ||
208 | if (rcuref->counter) | ||
209 | ret = rcuref->counter++; | ||
210 | else | ||
211 | ret = 0; | ||
212 | spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | |||
217 | #endif /* !__HAVE_ARCH_CMPXCHG */ | ||
218 | |||
219 | #endif /* __KERNEL__ */ | ||
220 | #endif /* _RCUREF_H_ */ | ||
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 17e458e17e2b..af00b10294cd 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
@@ -2097,7 +2097,7 @@ void reiserfs_free_block(struct reiserfs_transaction_handle *th, struct inode *, | |||
2097 | b_blocknr_t, int for_unformatted); | 2097 | b_blocknr_t, int for_unformatted); |
2098 | int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, | 2098 | int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *, b_blocknr_t *, int, |
2099 | int); | 2099 | int); |
2100 | extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, | 2100 | static inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, |
2101 | b_blocknr_t * new_blocknrs, | 2101 | b_blocknr_t * new_blocknrs, |
2102 | int amount_needed) | 2102 | int amount_needed) |
2103 | { | 2103 | { |
@@ -2113,7 +2113,7 @@ extern inline int reiserfs_new_form_blocknrs(struct tree_balance *tb, | |||
2113 | 0); | 2113 | 0); |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle | 2116 | static inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle |
2117 | *th, struct inode *inode, | 2117 | *th, struct inode *inode, |
2118 | b_blocknr_t * new_blocknrs, | 2118 | b_blocknr_t * new_blocknrs, |
2119 | struct path *path, long block) | 2119 | struct path *path, long block) |
@@ -2130,7 +2130,7 @@ extern inline int reiserfs_new_unf_blocknrs(struct reiserfs_transaction_handle | |||
2130 | } | 2130 | } |
2131 | 2131 | ||
2132 | #ifdef REISERFS_PREALLOCATE | 2132 | #ifdef REISERFS_PREALLOCATE |
2133 | extern inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle | 2133 | static inline int reiserfs_new_unf_blocknrs2(struct reiserfs_transaction_handle |
2134 | *th, struct inode *inode, | 2134 | *th, struct inode *inode, |
2135 | b_blocknr_t * new_blocknrs, | 2135 | b_blocknr_t * new_blocknrs, |
2136 | struct path *path, long block) | 2136 | struct path *path, long block) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index ea1b5f32ec5c..38c8654aaa96 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -114,6 +114,7 @@ extern unsigned long nr_iowait(void); | |||
114 | #define TASK_TRACED 8 | 114 | #define TASK_TRACED 8 |
115 | #define EXIT_ZOMBIE 16 | 115 | #define EXIT_ZOMBIE 16 |
116 | #define EXIT_DEAD 32 | 116 | #define EXIT_DEAD 32 |
117 | #define TASK_NONINTERACTIVE 64 | ||
117 | 118 | ||
118 | #define __set_task_state(tsk, state_value) \ | 119 | #define __set_task_state(tsk, state_value) \ |
119 | do { (tsk)->state = (state_value); } while (0) | 120 | do { (tsk)->state = (state_value); } while (0) |
@@ -202,6 +203,8 @@ extern int in_sched_functions(unsigned long addr); | |||
202 | 203 | ||
203 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX | 204 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
204 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | 205 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); |
206 | extern signed long schedule_timeout_interruptible(signed long timeout); | ||
207 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | ||
205 | asmlinkage void schedule(void); | 208 | asmlinkage void schedule(void); |
206 | 209 | ||
207 | struct namespace; | 210 | struct namespace; |
@@ -604,6 +607,11 @@ extern int groups_search(struct group_info *group_info, gid_t grp); | |||
604 | #define GROUP_AT(gi, i) \ | 607 | #define GROUP_AT(gi, i) \ |
605 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 608 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) |
606 | 609 | ||
610 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | ||
611 | extern void prefetch_stack(struct task_struct*); | ||
612 | #else | ||
613 | static inline void prefetch_stack(struct task_struct *t) { } | ||
614 | #endif | ||
607 | 615 | ||
608 | struct audit_context; /* See audit.c */ | 616 | struct audit_context; /* See audit.c */ |
609 | struct mempolicy; | 617 | struct mempolicy; |
@@ -895,6 +903,8 @@ extern int task_curr(const task_t *p); | |||
895 | extern int idle_cpu(int cpu); | 903 | extern int idle_cpu(int cpu); |
896 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 904 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); |
897 | extern task_t *idle_task(int cpu); | 905 | extern task_t *idle_task(int cpu); |
906 | extern task_t *curr_task(int cpu); | ||
907 | extern void set_curr_task(int cpu, task_t *p); | ||
898 | 908 | ||
899 | void yield(void); | 909 | void yield(void); |
900 | 910 | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 7aab6ab7c57f..55b02e1c73f4 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -250,29 +250,37 @@ struct swap_info_struct; | |||
250 | * @inode contains the inode structure. | 250 | * @inode contains the inode structure. |
251 | * Deallocate the inode security structure and set @inode->i_security to | 251 | * Deallocate the inode security structure and set @inode->i_security to |
252 | * NULL. | 252 | * NULL. |
253 | * @inode_init_security: | ||
254 | * Obtain the security attribute name suffix and value to set on a newly | ||
255 | * created inode and set up the incore security field for the new inode. | ||
256 | * This hook is called by the fs code as part of the inode creation | ||
257 | * transaction and provides for atomic labeling of the inode, unlike | ||
258 | * the post_create/mkdir/... hooks called by the VFS. The hook function | ||
259 | * is expected to allocate the name and value via kmalloc, with the caller | ||
260 | * being responsible for calling kfree after using them. | ||
261 | * If the security module does not use security attributes or does | ||
262 | * not wish to put a security attribute on this particular inode, | ||
263 | * then it should return -EOPNOTSUPP to skip this processing. | ||
264 | * @inode contains the inode structure of the newly created inode. | ||
265 | * @dir contains the inode structure of the parent directory. | ||
266 | * @name will be set to the allocated name suffix (e.g. selinux). | ||
267 | * @value will be set to the allocated attribute value. | ||
268 | * @len will be set to the length of the value. | ||
269 | * Returns 0 if @name and @value have been successfully set, | ||
270 | * -EOPNOTSUPP if no security attribute is needed, or | ||
271 | * -ENOMEM on memory allocation failure. | ||
253 | * @inode_create: | 272 | * @inode_create: |
254 | * Check permission to create a regular file. | 273 | * Check permission to create a regular file. |
255 | * @dir contains inode structure of the parent of the new file. | 274 | * @dir contains inode structure of the parent of the new file. |
256 | * @dentry contains the dentry structure for the file to be created. | 275 | * @dentry contains the dentry structure for the file to be created. |
257 | * @mode contains the file mode of the file to be created. | 276 | * @mode contains the file mode of the file to be created. |
258 | * Return 0 if permission is granted. | 277 | * Return 0 if permission is granted. |
259 | * @inode_post_create: | ||
260 | * Set the security attributes on a newly created regular file. This hook | ||
261 | * is called after a file has been successfully created. | ||
262 | * @dir contains the inode structure of the parent directory of the new file. | ||
263 | * @dentry contains the the dentry structure for the newly created file. | ||
264 | * @mode contains the file mode. | ||
265 | * @inode_link: | 278 | * @inode_link: |
266 | * Check permission before creating a new hard link to a file. | 279 | * Check permission before creating a new hard link to a file. |
267 | * @old_dentry contains the dentry structure for an existing link to the file. | 280 | * @old_dentry contains the dentry structure for an existing link to the file. |
268 | * @dir contains the inode structure of the parent directory of the new link. | 281 | * @dir contains the inode structure of the parent directory of the new link. |
269 | * @new_dentry contains the dentry structure for the new link. | 282 | * @new_dentry contains the dentry structure for the new link. |
270 | * Return 0 if permission is granted. | 283 | * Return 0 if permission is granted. |
271 | * @inode_post_link: | ||
272 | * Set security attributes for a new hard link to a file. | ||
273 | * @old_dentry contains the dentry structure for the existing link. | ||
274 | * @dir contains the inode structure of the parent directory of the new file. | ||
275 | * @new_dentry contains the dentry structure for the new file link. | ||
276 | * @inode_unlink: | 284 | * @inode_unlink: |
277 | * Check the permission to remove a hard link to a file. | 285 | * Check the permission to remove a hard link to a file. |
278 | * @dir contains the inode structure of parent directory of the file. | 286 | * @dir contains the inode structure of parent directory of the file. |
@@ -284,13 +292,6 @@ struct swap_info_struct; | |||
284 | * @dentry contains the dentry structure of the symbolic link. | 292 | * @dentry contains the dentry structure of the symbolic link. |
285 | * @old_name contains the pathname of file. | 293 | * @old_name contains the pathname of file. |
286 | * Return 0 if permission is granted. | 294 | * Return 0 if permission is granted. |
287 | * @inode_post_symlink: | ||
288 | * @dir contains the inode structure of the parent directory of the new link. | ||
289 | * @dentry contains the dentry structure of new symbolic link. | ||
290 | * @old_name contains the pathname of file. | ||
291 | * Set security attributes for a newly created symbolic link. Note that | ||
292 | * @dentry->d_inode may be NULL, since the filesystem might not | ||
293 | * instantiate the dentry (e.g. NFS). | ||
294 | * @inode_mkdir: | 295 | * @inode_mkdir: |
295 | * Check permissions to create a new directory in the existing directory | 296 | * Check permissions to create a new directory in the existing directory |
296 | * associated with inode strcture @dir. | 297 | * associated with inode strcture @dir. |
@@ -298,11 +299,6 @@ struct swap_info_struct; | |||
298 | * @dentry contains the dentry structure of new directory. | 299 | * @dentry contains the dentry structure of new directory. |
299 | * @mode contains the mode of new directory. | 300 | * @mode contains the mode of new directory. |
300 | * Return 0 if permission is granted. | 301 | * Return 0 if permission is granted. |
301 | * @inode_post_mkdir: | ||
302 | * Set security attributes on a newly created directory. | ||
303 | * @dir contains the inode structure of parent of the directory to be created. | ||
304 | * @dentry contains the dentry structure of new directory. | ||
305 | * @mode contains the mode of new directory. | ||
306 | * @inode_rmdir: | 302 | * @inode_rmdir: |
307 | * Check the permission to remove a directory. | 303 | * Check the permission to remove a directory. |
308 | * @dir contains the inode structure of parent of the directory to be removed. | 304 | * @dir contains the inode structure of parent of the directory to be removed. |
@@ -318,13 +314,6 @@ struct swap_info_struct; | |||
318 | * @mode contains the mode of the new file. | 314 | * @mode contains the mode of the new file. |
319 | * @dev contains the the device number. | 315 | * @dev contains the the device number. |
320 | * Return 0 if permission is granted. | 316 | * Return 0 if permission is granted. |
321 | * @inode_post_mknod: | ||
322 | * Set security attributes on a newly created special file (or socket or | ||
323 | * fifo file created via the mknod system call). | ||
324 | * @dir contains the inode structure of parent of the new node. | ||
325 | * @dentry contains the dentry structure of the new node. | ||
326 | * @mode contains the mode of the new node. | ||
327 | * @dev contains the the device number. | ||
328 | * @inode_rename: | 317 | * @inode_rename: |
329 | * Check for permission to rename a file or directory. | 318 | * Check for permission to rename a file or directory. |
330 | * @old_dir contains the inode structure for parent of the old link. | 319 | * @old_dir contains the inode structure for parent of the old link. |
@@ -332,12 +321,6 @@ struct swap_info_struct; | |||
332 | * @new_dir contains the inode structure for parent of the new link. | 321 | * @new_dir contains the inode structure for parent of the new link. |
333 | * @new_dentry contains the dentry structure of the new link. | 322 | * @new_dentry contains the dentry structure of the new link. |
334 | * Return 0 if permission is granted. | 323 | * Return 0 if permission is granted. |
335 | * @inode_post_rename: | ||
336 | * Set security attributes on a renamed file or directory. | ||
337 | * @old_dir contains the inode structure for parent of the old link. | ||
338 | * @old_dentry contains the dentry structure of the old link. | ||
339 | * @new_dir contains the inode structure for parent of the new link. | ||
340 | * @new_dentry contains the dentry structure of the new link. | ||
341 | * @inode_readlink: | 324 | * @inode_readlink: |
342 | * Check the permission to read the symbolic link. | 325 | * Check the permission to read the symbolic link. |
343 | * @dentry contains the dentry structure for the file link. | 326 | * @dentry contains the dentry structure for the file link. |
@@ -1080,34 +1063,21 @@ struct security_operations { | |||
1080 | 1063 | ||
1081 | int (*inode_alloc_security) (struct inode *inode); | 1064 | int (*inode_alloc_security) (struct inode *inode); |
1082 | void (*inode_free_security) (struct inode *inode); | 1065 | void (*inode_free_security) (struct inode *inode); |
1066 | int (*inode_init_security) (struct inode *inode, struct inode *dir, | ||
1067 | char **name, void **value, size_t *len); | ||
1083 | int (*inode_create) (struct inode *dir, | 1068 | int (*inode_create) (struct inode *dir, |
1084 | struct dentry *dentry, int mode); | 1069 | struct dentry *dentry, int mode); |
1085 | void (*inode_post_create) (struct inode *dir, | ||
1086 | struct dentry *dentry, int mode); | ||
1087 | int (*inode_link) (struct dentry *old_dentry, | 1070 | int (*inode_link) (struct dentry *old_dentry, |
1088 | struct inode *dir, struct dentry *new_dentry); | 1071 | struct inode *dir, struct dentry *new_dentry); |
1089 | void (*inode_post_link) (struct dentry *old_dentry, | ||
1090 | struct inode *dir, struct dentry *new_dentry); | ||
1091 | int (*inode_unlink) (struct inode *dir, struct dentry *dentry); | 1072 | int (*inode_unlink) (struct inode *dir, struct dentry *dentry); |
1092 | int (*inode_symlink) (struct inode *dir, | 1073 | int (*inode_symlink) (struct inode *dir, |
1093 | struct dentry *dentry, const char *old_name); | 1074 | struct dentry *dentry, const char *old_name); |
1094 | void (*inode_post_symlink) (struct inode *dir, | ||
1095 | struct dentry *dentry, | ||
1096 | const char *old_name); | ||
1097 | int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode); | 1075 | int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode); |
1098 | void (*inode_post_mkdir) (struct inode *dir, struct dentry *dentry, | ||
1099 | int mode); | ||
1100 | int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); | 1076 | int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); |
1101 | int (*inode_mknod) (struct inode *dir, struct dentry *dentry, | 1077 | int (*inode_mknod) (struct inode *dir, struct dentry *dentry, |
1102 | int mode, dev_t dev); | 1078 | int mode, dev_t dev); |
1103 | void (*inode_post_mknod) (struct inode *dir, struct dentry *dentry, | ||
1104 | int mode, dev_t dev); | ||
1105 | int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, | 1079 | int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, |
1106 | struct inode *new_dir, struct dentry *new_dentry); | 1080 | struct inode *new_dir, struct dentry *new_dentry); |
1107 | void (*inode_post_rename) (struct inode *old_dir, | ||
1108 | struct dentry *old_dentry, | ||
1109 | struct inode *new_dir, | ||
1110 | struct dentry *new_dentry); | ||
1111 | int (*inode_readlink) (struct dentry *dentry); | 1081 | int (*inode_readlink) (struct dentry *dentry); |
1112 | int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); | 1082 | int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); |
1113 | int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd); | 1083 | int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd); |
@@ -1442,6 +1412,17 @@ static inline void security_inode_free (struct inode *inode) | |||
1442 | return; | 1412 | return; |
1443 | security_ops->inode_free_security (inode); | 1413 | security_ops->inode_free_security (inode); |
1444 | } | 1414 | } |
1415 | |||
1416 | static inline int security_inode_init_security (struct inode *inode, | ||
1417 | struct inode *dir, | ||
1418 | char **name, | ||
1419 | void **value, | ||
1420 | size_t *len) | ||
1421 | { | ||
1422 | if (unlikely (IS_PRIVATE (inode))) | ||
1423 | return -EOPNOTSUPP; | ||
1424 | return security_ops->inode_init_security (inode, dir, name, value, len); | ||
1425 | } | ||
1445 | 1426 | ||
1446 | static inline int security_inode_create (struct inode *dir, | 1427 | static inline int security_inode_create (struct inode *dir, |
1447 | struct dentry *dentry, | 1428 | struct dentry *dentry, |
@@ -1452,15 +1433,6 @@ static inline int security_inode_create (struct inode *dir, | |||
1452 | return security_ops->inode_create (dir, dentry, mode); | 1433 | return security_ops->inode_create (dir, dentry, mode); |
1453 | } | 1434 | } |
1454 | 1435 | ||
1455 | static inline void security_inode_post_create (struct inode *dir, | ||
1456 | struct dentry *dentry, | ||
1457 | int mode) | ||
1458 | { | ||
1459 | if (dentry->d_inode && unlikely (IS_PRIVATE (dentry->d_inode))) | ||
1460 | return; | ||
1461 | security_ops->inode_post_create (dir, dentry, mode); | ||
1462 | } | ||
1463 | |||
1464 | static inline int security_inode_link (struct dentry *old_dentry, | 1436 | static inline int security_inode_link (struct dentry *old_dentry, |
1465 | struct inode *dir, | 1437 | struct inode *dir, |
1466 | struct dentry *new_dentry) | 1438 | struct dentry *new_dentry) |
@@ -1470,15 +1442,6 @@ static inline int security_inode_link (struct dentry *old_dentry, | |||
1470 | return security_ops->inode_link (old_dentry, dir, new_dentry); | 1442 | return security_ops->inode_link (old_dentry, dir, new_dentry); |
1471 | } | 1443 | } |
1472 | 1444 | ||
1473 | static inline void security_inode_post_link (struct dentry *old_dentry, | ||
1474 | struct inode *dir, | ||
1475 | struct dentry *new_dentry) | ||
1476 | { | ||
1477 | if (new_dentry->d_inode && unlikely (IS_PRIVATE (new_dentry->d_inode))) | ||
1478 | return; | ||
1479 | security_ops->inode_post_link (old_dentry, dir, new_dentry); | ||
1480 | } | ||
1481 | |||
1482 | static inline int security_inode_unlink (struct inode *dir, | 1445 | static inline int security_inode_unlink (struct inode *dir, |
1483 | struct dentry *dentry) | 1446 | struct dentry *dentry) |
1484 | { | 1447 | { |
@@ -1496,15 +1459,6 @@ static inline int security_inode_symlink (struct inode *dir, | |||
1496 | return security_ops->inode_symlink (dir, dentry, old_name); | 1459 | return security_ops->inode_symlink (dir, dentry, old_name); |
1497 | } | 1460 | } |
1498 | 1461 | ||
1499 | static inline void security_inode_post_symlink (struct inode *dir, | ||
1500 | struct dentry *dentry, | ||
1501 | const char *old_name) | ||
1502 | { | ||
1503 | if (dentry->d_inode && unlikely (IS_PRIVATE (dentry->d_inode))) | ||
1504 | return; | ||
1505 | security_ops->inode_post_symlink (dir, dentry, old_name); | ||
1506 | } | ||
1507 | |||
1508 | static inline int security_inode_mkdir (struct inode *dir, | 1462 | static inline int security_inode_mkdir (struct inode *dir, |
1509 | struct dentry *dentry, | 1463 | struct dentry *dentry, |
1510 | int mode) | 1464 | int mode) |
@@ -1514,15 +1468,6 @@ static inline int security_inode_mkdir (struct inode *dir, | |||
1514 | return security_ops->inode_mkdir (dir, dentry, mode); | 1468 | return security_ops->inode_mkdir (dir, dentry, mode); |
1515 | } | 1469 | } |
1516 | 1470 | ||
1517 | static inline void security_inode_post_mkdir (struct inode *dir, | ||
1518 | struct dentry *dentry, | ||
1519 | int mode) | ||
1520 | { | ||
1521 | if (dentry->d_inode && unlikely (IS_PRIVATE (dentry->d_inode))) | ||
1522 | return; | ||
1523 | security_ops->inode_post_mkdir (dir, dentry, mode); | ||
1524 | } | ||
1525 | |||
1526 | static inline int security_inode_rmdir (struct inode *dir, | 1471 | static inline int security_inode_rmdir (struct inode *dir, |
1527 | struct dentry *dentry) | 1472 | struct dentry *dentry) |
1528 | { | 1473 | { |
@@ -1540,15 +1485,6 @@ static inline int security_inode_mknod (struct inode *dir, | |||
1540 | return security_ops->inode_mknod (dir, dentry, mode, dev); | 1485 | return security_ops->inode_mknod (dir, dentry, mode, dev); |
1541 | } | 1486 | } |
1542 | 1487 | ||
1543 | static inline void security_inode_post_mknod (struct inode *dir, | ||
1544 | struct dentry *dentry, | ||
1545 | int mode, dev_t dev) | ||
1546 | { | ||
1547 | if (dentry->d_inode && unlikely (IS_PRIVATE (dentry->d_inode))) | ||
1548 | return; | ||
1549 | security_ops->inode_post_mknod (dir, dentry, mode, dev); | ||
1550 | } | ||
1551 | |||
1552 | static inline int security_inode_rename (struct inode *old_dir, | 1488 | static inline int security_inode_rename (struct inode *old_dir, |
1553 | struct dentry *old_dentry, | 1489 | struct dentry *old_dentry, |
1554 | struct inode *new_dir, | 1490 | struct inode *new_dir, |
@@ -1561,18 +1497,6 @@ static inline int security_inode_rename (struct inode *old_dir, | |||
1561 | new_dir, new_dentry); | 1497 | new_dir, new_dentry); |
1562 | } | 1498 | } |
1563 | 1499 | ||
1564 | static inline void security_inode_post_rename (struct inode *old_dir, | ||
1565 | struct dentry *old_dentry, | ||
1566 | struct inode *new_dir, | ||
1567 | struct dentry *new_dentry) | ||
1568 | { | ||
1569 | if (unlikely (IS_PRIVATE (old_dentry->d_inode) || | ||
1570 | (new_dentry->d_inode && IS_PRIVATE (new_dentry->d_inode)))) | ||
1571 | return; | ||
1572 | security_ops->inode_post_rename (old_dir, old_dentry, | ||
1573 | new_dir, new_dentry); | ||
1574 | } | ||
1575 | |||
1576 | static inline int security_inode_readlink (struct dentry *dentry) | 1500 | static inline int security_inode_readlink (struct dentry *dentry) |
1577 | { | 1501 | { |
1578 | if (unlikely (IS_PRIVATE (dentry->d_inode))) | 1502 | if (unlikely (IS_PRIVATE (dentry->d_inode))) |
@@ -2171,6 +2095,15 @@ static inline int security_inode_alloc (struct inode *inode) | |||
2171 | 2095 | ||
2172 | static inline void security_inode_free (struct inode *inode) | 2096 | static inline void security_inode_free (struct inode *inode) |
2173 | { } | 2097 | { } |
2098 | |||
2099 | static inline int security_inode_init_security (struct inode *inode, | ||
2100 | struct inode *dir, | ||
2101 | char **name, | ||
2102 | void **value, | ||
2103 | size_t *len) | ||
2104 | { | ||
2105 | return -EOPNOTSUPP; | ||
2106 | } | ||
2174 | 2107 | ||
2175 | static inline int security_inode_create (struct inode *dir, | 2108 | static inline int security_inode_create (struct inode *dir, |
2176 | struct dentry *dentry, | 2109 | struct dentry *dentry, |
@@ -2179,11 +2112,6 @@ static inline int security_inode_create (struct inode *dir, | |||
2179 | return 0; | 2112 | return 0; |
2180 | } | 2113 | } |
2181 | 2114 | ||
2182 | static inline void security_inode_post_create (struct inode *dir, | ||
2183 | struct dentry *dentry, | ||
2184 | int mode) | ||
2185 | { } | ||
2186 | |||
2187 | static inline int security_inode_link (struct dentry *old_dentry, | 2115 | static inline int security_inode_link (struct dentry *old_dentry, |
2188 | struct inode *dir, | 2116 | struct inode *dir, |
2189 | struct dentry *new_dentry) | 2117 | struct dentry *new_dentry) |
@@ -2191,11 +2119,6 @@ static inline int security_inode_link (struct dentry *old_dentry, | |||
2191 | return 0; | 2119 | return 0; |
2192 | } | 2120 | } |
2193 | 2121 | ||
2194 | static inline void security_inode_post_link (struct dentry *old_dentry, | ||
2195 | struct inode *dir, | ||
2196 | struct dentry *new_dentry) | ||
2197 | { } | ||
2198 | |||
2199 | static inline int security_inode_unlink (struct inode *dir, | 2122 | static inline int security_inode_unlink (struct inode *dir, |
2200 | struct dentry *dentry) | 2123 | struct dentry *dentry) |
2201 | { | 2124 | { |
@@ -2209,11 +2132,6 @@ static inline int security_inode_symlink (struct inode *dir, | |||
2209 | return 0; | 2132 | return 0; |
2210 | } | 2133 | } |
2211 | 2134 | ||
2212 | static inline void security_inode_post_symlink (struct inode *dir, | ||
2213 | struct dentry *dentry, | ||
2214 | const char *old_name) | ||
2215 | { } | ||
2216 | |||
2217 | static inline int security_inode_mkdir (struct inode *dir, | 2135 | static inline int security_inode_mkdir (struct inode *dir, |
2218 | struct dentry *dentry, | 2136 | struct dentry *dentry, |
2219 | int mode) | 2137 | int mode) |
@@ -2221,11 +2139,6 @@ static inline int security_inode_mkdir (struct inode *dir, | |||
2221 | return 0; | 2139 | return 0; |
2222 | } | 2140 | } |
2223 | 2141 | ||
2224 | static inline void security_inode_post_mkdir (struct inode *dir, | ||
2225 | struct dentry *dentry, | ||
2226 | int mode) | ||
2227 | { } | ||
2228 | |||
2229 | static inline int security_inode_rmdir (struct inode *dir, | 2142 | static inline int security_inode_rmdir (struct inode *dir, |
2230 | struct dentry *dentry) | 2143 | struct dentry *dentry) |
2231 | { | 2144 | { |
@@ -2239,11 +2152,6 @@ static inline int security_inode_mknod (struct inode *dir, | |||
2239 | return 0; | 2152 | return 0; |
2240 | } | 2153 | } |
2241 | 2154 | ||
2242 | static inline void security_inode_post_mknod (struct inode *dir, | ||
2243 | struct dentry *dentry, | ||
2244 | int mode, dev_t dev) | ||
2245 | { } | ||
2246 | |||
2247 | static inline int security_inode_rename (struct inode *old_dir, | 2155 | static inline int security_inode_rename (struct inode *old_dir, |
2248 | struct dentry *old_dentry, | 2156 | struct dentry *old_dentry, |
2249 | struct inode *new_dir, | 2157 | struct inode *new_dir, |
@@ -2252,12 +2160,6 @@ static inline int security_inode_rename (struct inode *old_dir, | |||
2252 | return 0; | 2160 | return 0; |
2253 | } | 2161 | } |
2254 | 2162 | ||
2255 | static inline void security_inode_post_rename (struct inode *old_dir, | ||
2256 | struct dentry *old_dentry, | ||
2257 | struct inode *new_dir, | ||
2258 | struct dentry *new_dentry) | ||
2259 | { } | ||
2260 | |||
2261 | static inline int security_inode_readlink (struct dentry *dentry) | 2163 | static inline int security_inode_readlink (struct dentry *dentry) |
2262 | { | 2164 | { |
2263 | return 0; | 2165 | return 0; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 42a6bea58af3..1f356f3bbc64 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -118,7 +118,8 @@ extern void kfree(const void *); | |||
118 | extern unsigned int ksize(const void *); | 118 | extern unsigned int ksize(const void *); |
119 | 119 | ||
120 | #ifdef CONFIG_NUMA | 120 | #ifdef CONFIG_NUMA |
121 | extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); | 121 | extern void *kmem_cache_alloc_node(kmem_cache_t *, |
122 | unsigned int __nocast flags, int node); | ||
122 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); | 123 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); |
123 | #else | 124 | #else |
124 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) | 125 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d6ba068719b6..cdc99a27840d 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -2,7 +2,48 @@ | |||
2 | #define __LINUX_SPINLOCK_H | 2 | #define __LINUX_SPINLOCK_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * include/linux/spinlock.h - generic locking declarations | 5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
6 | * | ||
7 | * here's the role of the various spinlock/rwlock related include files: | ||
8 | * | ||
9 | * on SMP builds: | ||
10 | * | ||
11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | ||
12 | * initializers | ||
13 | * | ||
14 | * linux/spinlock_types.h: | ||
15 | * defines the generic type and initializers | ||
16 | * | ||
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | ||
18 | * implementations, mostly inline assembly code | ||
19 | * | ||
20 | * (also included on UP-debug builds:) | ||
21 | * | ||
22 | * linux/spinlock_api_smp.h: | ||
23 | * contains the prototypes for the _spin_*() APIs. | ||
24 | * | ||
25 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
26 | * | ||
27 | * on UP builds: | ||
28 | * | ||
29 | * linux/spinlock_type_up.h: | ||
30 | * contains the generic, simplified UP spinlock type. | ||
31 | * (which is an empty structure on non-debug builds) | ||
32 | * | ||
33 | * linux/spinlock_types.h: | ||
34 | * defines the generic type and initializers | ||
35 | * | ||
36 | * linux/spinlock_up.h: | ||
37 | * contains the __raw_spin_*()/etc. version of UP | ||
38 | * builds. (which are NOPs on non-debug, non-preempt | ||
39 | * builds) | ||
40 | * | ||
41 | * (included on UP-non-debug builds:) | ||
42 | * | ||
43 | * linux/spinlock_api_up.h: | ||
44 | * builds the _spin_*() APIs. | ||
45 | * | ||
46 | * linux/spinlock.h: builds the final spin_*() APIs. | ||
6 | */ | 47 | */ |
7 | 48 | ||
8 | #include <linux/config.h> | 49 | #include <linux/config.h> |
@@ -13,7 +54,6 @@ | |||
13 | #include <linux/kernel.h> | 54 | #include <linux/kernel.h> |
14 | #include <linux/stringify.h> | 55 | #include <linux/stringify.h> |
15 | 56 | ||
16 | #include <asm/processor.h> /* for cpu relax */ | ||
17 | #include <asm/system.h> | 57 | #include <asm/system.h> |
18 | 58 | ||
19 | /* | 59 | /* |
@@ -35,423 +75,84 @@ | |||
35 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) |
36 | 76 | ||
37 | /* | 77 | /* |
38 | * If CONFIG_SMP is set, pull in the _raw_* definitions | 78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: |
39 | */ | 79 | */ |
40 | #ifdef CONFIG_SMP | 80 | #include <linux/spinlock_types.h> |
41 | |||
42 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
43 | #include <asm/spinlock.h> | ||
44 | |||
45 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
46 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
47 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
48 | |||
49 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
50 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
51 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
52 | |||
53 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
54 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
55 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
56 | |||
57 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); | ||
58 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
59 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | ||
60 | |||
61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
62 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
63 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
64 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
65 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
66 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
67 | |||
68 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); | ||
69 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
70 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
71 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
72 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
73 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
74 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | ||
75 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
76 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
77 | |||
78 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
79 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock); | ||
80 | int in_lock_functions(unsigned long addr); | ||
81 | |||
82 | #else | ||
83 | 81 | ||
84 | #define in_lock_functions(ADDR) 0 | 82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
85 | 83 | ||
86 | #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
87 | # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | ||
88 | # define ATOMIC_DEC_AND_LOCK | ||
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
92 | |||
93 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
94 | typedef struct { | ||
95 | unsigned long magic; | ||
96 | volatile unsigned long lock; | ||
97 | volatile unsigned int babble; | ||
98 | const char *module; | ||
99 | char *owner; | ||
100 | int oline; | ||
101 | } spinlock_t; | ||
102 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} | ||
103 | |||
104 | #define spin_lock_init(x) \ | ||
105 | do { \ | ||
106 | (x)->magic = SPINLOCK_MAGIC; \ | ||
107 | (x)->lock = 0; \ | ||
108 | (x)->babble = 5; \ | ||
109 | (x)->module = __FILE__; \ | ||
110 | (x)->owner = NULL; \ | ||
111 | (x)->oline = 0; \ | ||
112 | } while (0) | ||
113 | |||
114 | #define CHECK_LOCK(x) \ | ||
115 | do { \ | ||
116 | if ((x)->magic != SPINLOCK_MAGIC) { \ | ||
117 | printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ | ||
118 | __FILE__, __LINE__, (x)); \ | ||
119 | } \ | ||
120 | } while(0) | ||
121 | |||
122 | #define _raw_spin_lock(x) \ | ||
123 | do { \ | ||
124 | CHECK_LOCK(x); \ | ||
125 | if ((x)->lock&&(x)->babble) { \ | ||
126 | (x)->babble--; \ | ||
127 | printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ | ||
128 | __FILE__,__LINE__, (x)->module, \ | ||
129 | (x), (x)->owner, (x)->oline); \ | ||
130 | } \ | ||
131 | (x)->lock = 1; \ | ||
132 | (x)->owner = __FILE__; \ | ||
133 | (x)->oline = __LINE__; \ | ||
134 | } while (0) | ||
135 | |||
136 | /* without debugging, spin_is_locked on UP always says | ||
137 | * FALSE. --> printk if already locked. */ | ||
138 | #define spin_is_locked(x) \ | ||
139 | ({ \ | ||
140 | CHECK_LOCK(x); \ | ||
141 | if ((x)->lock&&(x)->babble) { \ | ||
142 | (x)->babble--; \ | ||
143 | printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ | ||
144 | __FILE__,__LINE__, (x)->module, \ | ||
145 | (x), (x)->owner, (x)->oline); \ | ||
146 | } \ | ||
147 | 0; \ | ||
148 | }) | ||
149 | |||
150 | /* with debugging, assert_spin_locked() on UP does check | ||
151 | * the lock value properly */ | ||
152 | #define assert_spin_locked(x) \ | ||
153 | ({ \ | ||
154 | CHECK_LOCK(x); \ | ||
155 | BUG_ON(!(x)->lock); \ | ||
156 | }) | ||
157 | |||
158 | /* without debugging, spin_trylock on UP always says | ||
159 | * TRUE. --> printk if already locked. */ | ||
160 | #define _raw_spin_trylock(x) \ | ||
161 | ({ \ | ||
162 | CHECK_LOCK(x); \ | ||
163 | if ((x)->lock&&(x)->babble) { \ | ||
164 | (x)->babble--; \ | ||
165 | printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ | ||
166 | __FILE__,__LINE__, (x)->module, \ | ||
167 | (x), (x)->owner, (x)->oline); \ | ||
168 | } \ | ||
169 | (x)->lock = 1; \ | ||
170 | (x)->owner = __FILE__; \ | ||
171 | (x)->oline = __LINE__; \ | ||
172 | 1; \ | ||
173 | }) | ||
174 | |||
175 | #define spin_unlock_wait(x) \ | ||
176 | do { \ | ||
177 | CHECK_LOCK(x); \ | ||
178 | if ((x)->lock&&(x)->babble) { \ | ||
179 | (x)->babble--; \ | ||
180 | printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ | ||
181 | __FILE__,__LINE__, (x)->module, (x), \ | ||
182 | (x)->owner, (x)->oline); \ | ||
183 | }\ | ||
184 | } while (0) | ||
185 | |||
186 | #define _raw_spin_unlock(x) \ | ||
187 | do { \ | ||
188 | CHECK_LOCK(x); \ | ||
189 | if (!(x)->lock&&(x)->babble) { \ | ||
190 | (x)->babble--; \ | ||
191 | printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ | ||
192 | __FILE__,__LINE__, (x)->module, (x));\ | ||
193 | } \ | ||
194 | (x)->lock = 0; \ | ||
195 | } while (0) | ||
196 | #else | ||
197 | /* | 84 | /* |
198 | * gcc versions before ~2.95 have a nasty bug with empty initializers. | 85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
199 | */ | 86 | */ |
200 | #if (__GNUC__ > 2) | 87 | #if defined(CONFIG_SMP) |
201 | typedef struct { } spinlock_t; | 88 | # include <asm/spinlock.h> |
202 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { } | ||
203 | #else | 89 | #else |
204 | typedef struct { int gcc_is_buggy; } spinlock_t; | 90 | # include <linux/spinlock_up.h> |
205 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
206 | #endif | 91 | #endif |
207 | 92 | ||
93 | #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | ||
94 | #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | ||
95 | |||
96 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | ||
97 | |||
98 | /** | ||
99 | * spin_unlock_wait - wait until the spinlock gets unlocked | ||
100 | * @lock: the spinlock in question. | ||
101 | */ | ||
102 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | ||
103 | |||
208 | /* | 104 | /* |
209 | * If CONFIG_SMP is unset, declare the _raw_* definitions as nops | 105 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
210 | */ | 106 | */ |
211 | #define spin_lock_init(lock) do { (void)(lock); } while(0) | 107 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
212 | #define _raw_spin_lock(lock) do { (void)(lock); } while(0) | 108 | # include <linux/spinlock_api_smp.h> |
213 | #define spin_is_locked(lock) ((void)(lock), 0) | ||
214 | #define assert_spin_locked(lock) do { (void)(lock); } while(0) | ||
215 | #define _raw_spin_trylock(lock) (((void)(lock), 1)) | ||
216 | #define spin_unlock_wait(lock) (void)(lock) | ||
217 | #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) | ||
218 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
219 | |||
220 | /* RW spinlocks: No debug version */ | ||
221 | |||
222 | #if (__GNUC__ > 2) | ||
223 | typedef struct { } rwlock_t; | ||
224 | #define RW_LOCK_UNLOCKED (rwlock_t) { } | ||
225 | #else | 109 | #else |
226 | typedef struct { int gcc_is_buggy; } rwlock_t; | 110 | # include <linux/spinlock_api_up.h> |
227 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
228 | #endif | 111 | #endif |
229 | 112 | ||
230 | #define rwlock_init(lock) do { (void)(lock); } while(0) | 113 | #ifdef CONFIG_DEBUG_SPINLOCK |
231 | #define _raw_read_lock(lock) do { (void)(lock); } while(0) | 114 | extern void _raw_spin_lock(spinlock_t *lock); |
232 | #define _raw_read_unlock(lock) do { (void)(lock); } while(0) | 115 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
233 | #define _raw_write_lock(lock) do { (void)(lock); } while(0) | 116 | extern int _raw_spin_trylock(spinlock_t *lock); |
234 | #define _raw_write_unlock(lock) do { (void)(lock); } while(0) | 117 | extern void _raw_spin_unlock(spinlock_t *lock); |
235 | #define read_can_lock(lock) (((void)(lock), 1)) | 118 | |
236 | #define write_can_lock(lock) (((void)(lock), 1)) | 119 | extern void _raw_read_lock(rwlock_t *lock); |
237 | #define _raw_read_trylock(lock) ({ (void)(lock); (1); }) | 120 | extern int _raw_read_trylock(rwlock_t *lock); |
238 | #define _raw_write_trylock(lock) ({ (void)(lock); (1); }) | 121 | extern void _raw_read_unlock(rwlock_t *lock); |
239 | 122 | extern void _raw_write_lock(rwlock_t *lock); | |
240 | #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | 123 | extern int _raw_write_trylock(rwlock_t *lock); |
241 | 1 : ({preempt_enable(); 0;});}) | 124 | extern void _raw_write_unlock(rwlock_t *lock); |
242 | 125 | #else | |
243 | #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ | 126 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
244 | 1 : ({preempt_enable(); 0;});}) | 127 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
245 | 128 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | |
246 | #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ | 129 | # define _raw_spin_lock_flags(lock, flags) \ |
247 | 1 : ({preempt_enable(); 0;});}) | 130 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
248 | 131 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | |
249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | 132 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
250 | _raw_spin_trylock(lock) ? \ | 133 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) | 134 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
252 | 135 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | |
253 | #define _spin_lock(lock) \ | 136 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
254 | do { \ | 137 | #endif |
255 | preempt_disable(); \ | ||
256 | _raw_spin_lock(lock); \ | ||
257 | __acquire(lock); \ | ||
258 | } while(0) | ||
259 | |||
260 | #define _write_lock(lock) \ | ||
261 | do { \ | ||
262 | preempt_disable(); \ | ||
263 | _raw_write_lock(lock); \ | ||
264 | __acquire(lock); \ | ||
265 | } while(0) | ||
266 | |||
267 | #define _read_lock(lock) \ | ||
268 | do { \ | ||
269 | preempt_disable(); \ | ||
270 | _raw_read_lock(lock); \ | ||
271 | __acquire(lock); \ | ||
272 | } while(0) | ||
273 | |||
274 | #define _spin_unlock(lock) \ | ||
275 | do { \ | ||
276 | _raw_spin_unlock(lock); \ | ||
277 | preempt_enable(); \ | ||
278 | __release(lock); \ | ||
279 | } while (0) | ||
280 | |||
281 | #define _write_unlock(lock) \ | ||
282 | do { \ | ||
283 | _raw_write_unlock(lock); \ | ||
284 | preempt_enable(); \ | ||
285 | __release(lock); \ | ||
286 | } while(0) | ||
287 | |||
288 | #define _read_unlock(lock) \ | ||
289 | do { \ | ||
290 | _raw_read_unlock(lock); \ | ||
291 | preempt_enable(); \ | ||
292 | __release(lock); \ | ||
293 | } while(0) | ||
294 | |||
295 | #define _spin_lock_irqsave(lock, flags) \ | ||
296 | do { \ | ||
297 | local_irq_save(flags); \ | ||
298 | preempt_disable(); \ | ||
299 | _raw_spin_lock(lock); \ | ||
300 | __acquire(lock); \ | ||
301 | } while (0) | ||
302 | |||
303 | #define _spin_lock_irq(lock) \ | ||
304 | do { \ | ||
305 | local_irq_disable(); \ | ||
306 | preempt_disable(); \ | ||
307 | _raw_spin_lock(lock); \ | ||
308 | __acquire(lock); \ | ||
309 | } while (0) | ||
310 | |||
311 | #define _spin_lock_bh(lock) \ | ||
312 | do { \ | ||
313 | local_bh_disable(); \ | ||
314 | preempt_disable(); \ | ||
315 | _raw_spin_lock(lock); \ | ||
316 | __acquire(lock); \ | ||
317 | } while (0) | ||
318 | |||
319 | #define _read_lock_irqsave(lock, flags) \ | ||
320 | do { \ | ||
321 | local_irq_save(flags); \ | ||
322 | preempt_disable(); \ | ||
323 | _raw_read_lock(lock); \ | ||
324 | __acquire(lock); \ | ||
325 | } while (0) | ||
326 | |||
327 | #define _read_lock_irq(lock) \ | ||
328 | do { \ | ||
329 | local_irq_disable(); \ | ||
330 | preempt_disable(); \ | ||
331 | _raw_read_lock(lock); \ | ||
332 | __acquire(lock); \ | ||
333 | } while (0) | ||
334 | |||
335 | #define _read_lock_bh(lock) \ | ||
336 | do { \ | ||
337 | local_bh_disable(); \ | ||
338 | preempt_disable(); \ | ||
339 | _raw_read_lock(lock); \ | ||
340 | __acquire(lock); \ | ||
341 | } while (0) | ||
342 | |||
343 | #define _write_lock_irqsave(lock, flags) \ | ||
344 | do { \ | ||
345 | local_irq_save(flags); \ | ||
346 | preempt_disable(); \ | ||
347 | _raw_write_lock(lock); \ | ||
348 | __acquire(lock); \ | ||
349 | } while (0) | ||
350 | 138 | ||
351 | #define _write_lock_irq(lock) \ | 139 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
352 | do { \ | 140 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
353 | local_irq_disable(); \ | ||
354 | preempt_disable(); \ | ||
355 | _raw_write_lock(lock); \ | ||
356 | __acquire(lock); \ | ||
357 | } while (0) | ||
358 | |||
359 | #define _write_lock_bh(lock) \ | ||
360 | do { \ | ||
361 | local_bh_disable(); \ | ||
362 | preempt_disable(); \ | ||
363 | _raw_write_lock(lock); \ | ||
364 | __acquire(lock); \ | ||
365 | } while (0) | ||
366 | |||
367 | #define _spin_unlock_irqrestore(lock, flags) \ | ||
368 | do { \ | ||
369 | _raw_spin_unlock(lock); \ | ||
370 | local_irq_restore(flags); \ | ||
371 | preempt_enable(); \ | ||
372 | __release(lock); \ | ||
373 | } while (0) | ||
374 | |||
375 | #define _spin_unlock_irq(lock) \ | ||
376 | do { \ | ||
377 | _raw_spin_unlock(lock); \ | ||
378 | local_irq_enable(); \ | ||
379 | preempt_enable(); \ | ||
380 | __release(lock); \ | ||
381 | } while (0) | ||
382 | |||
383 | #define _spin_unlock_bh(lock) \ | ||
384 | do { \ | ||
385 | _raw_spin_unlock(lock); \ | ||
386 | preempt_enable_no_resched(); \ | ||
387 | local_bh_enable(); \ | ||
388 | __release(lock); \ | ||
389 | } while (0) | ||
390 | |||
391 | #define _write_unlock_bh(lock) \ | ||
392 | do { \ | ||
393 | _raw_write_unlock(lock); \ | ||
394 | preempt_enable_no_resched(); \ | ||
395 | local_bh_enable(); \ | ||
396 | __release(lock); \ | ||
397 | } while (0) | ||
398 | |||
399 | #define _read_unlock_irqrestore(lock, flags) \ | ||
400 | do { \ | ||
401 | _raw_read_unlock(lock); \ | ||
402 | local_irq_restore(flags); \ | ||
403 | preempt_enable(); \ | ||
404 | __release(lock); \ | ||
405 | } while (0) | ||
406 | |||
407 | #define _write_unlock_irqrestore(lock, flags) \ | ||
408 | do { \ | ||
409 | _raw_write_unlock(lock); \ | ||
410 | local_irq_restore(flags); \ | ||
411 | preempt_enable(); \ | ||
412 | __release(lock); \ | ||
413 | } while (0) | ||
414 | |||
415 | #define _read_unlock_irq(lock) \ | ||
416 | do { \ | ||
417 | _raw_read_unlock(lock); \ | ||
418 | local_irq_enable(); \ | ||
419 | preempt_enable(); \ | ||
420 | __release(lock); \ | ||
421 | } while (0) | ||
422 | |||
423 | #define _read_unlock_bh(lock) \ | ||
424 | do { \ | ||
425 | _raw_read_unlock(lock); \ | ||
426 | preempt_enable_no_resched(); \ | ||
427 | local_bh_enable(); \ | ||
428 | __release(lock); \ | ||
429 | } while (0) | ||
430 | |||
431 | #define _write_unlock_irq(lock) \ | ||
432 | do { \ | ||
433 | _raw_write_unlock(lock); \ | ||
434 | local_irq_enable(); \ | ||
435 | preempt_enable(); \ | ||
436 | __release(lock); \ | ||
437 | } while (0) | ||
438 | |||
439 | #endif /* !SMP */ | ||
440 | 141 | ||
441 | /* | 142 | /* |
442 | * Define the various spin_lock and rw_lock methods. Note we define these | 143 | * Define the various spin_lock and rw_lock methods. Note we define these |
443 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 144 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
444 | * methods are defined as nops in the case they are not required. | 145 | * methods are defined as nops in the case they are not required. |
445 | */ | 146 | */ |
446 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) | 147 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) |
447 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) | 148 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) |
448 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | 149 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) |
449 | 150 | ||
450 | #define spin_lock(lock) _spin_lock(lock) | 151 | #define spin_lock(lock) _spin_lock(lock) |
451 | #define write_lock(lock) _write_lock(lock) | 152 | #define write_lock(lock) _write_lock(lock) |
452 | #define read_lock(lock) _read_lock(lock) | 153 | #define read_lock(lock) _read_lock(lock) |
453 | 154 | ||
454 | #ifdef CONFIG_SMP | 155 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
455 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) | 156 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) |
456 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) | 157 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) |
457 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) | 158 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) |
@@ -470,137 +171,59 @@ do { \ | |||
470 | #define write_lock_irq(lock) _write_lock_irq(lock) | 171 | #define write_lock_irq(lock) _write_lock_irq(lock) |
471 | #define write_lock_bh(lock) _write_lock_bh(lock) | 172 | #define write_lock_bh(lock) _write_lock_bh(lock) |
472 | 173 | ||
473 | #define spin_unlock(lock) _spin_unlock(lock) | 174 | #define spin_unlock(lock) _spin_unlock(lock) |
474 | #define write_unlock(lock) _write_unlock(lock) | 175 | #define write_unlock(lock) _write_unlock(lock) |
475 | #define read_unlock(lock) _read_unlock(lock) | 176 | #define read_unlock(lock) _read_unlock(lock) |
476 | 177 | ||
477 | #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) | 178 | #define spin_unlock_irqrestore(lock, flags) \ |
179 | _spin_unlock_irqrestore(lock, flags) | ||
478 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | 180 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
479 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | 181 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
480 | 182 | ||
481 | #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) | 183 | #define read_unlock_irqrestore(lock, flags) \ |
482 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | 184 | _read_unlock_irqrestore(lock, flags) |
483 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 185 | #define read_unlock_irq(lock) _read_unlock_irq(lock) |
186 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | ||
484 | 187 | ||
485 | #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) | 188 | #define write_unlock_irqrestore(lock, flags) \ |
486 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | 189 | _write_unlock_irqrestore(lock, flags) |
487 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 190 | #define write_unlock_irq(lock) _write_unlock_irq(lock) |
191 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | ||
488 | 192 | ||
489 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) | 193 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) |
490 | 194 | ||
491 | #define spin_trylock_irq(lock) \ | 195 | #define spin_trylock_irq(lock) \ |
492 | ({ \ | 196 | ({ \ |
493 | local_irq_disable(); \ | 197 | local_irq_disable(); \ |
494 | _spin_trylock(lock) ? \ | 198 | _spin_trylock(lock) ? \ |
495 | 1 : ({local_irq_enable(); 0; }); \ | 199 | 1 : ({ local_irq_enable(); 0; }); \ |
496 | }) | 200 | }) |
497 | 201 | ||
498 | #define spin_trylock_irqsave(lock, flags) \ | 202 | #define spin_trylock_irqsave(lock, flags) \ |
499 | ({ \ | 203 | ({ \ |
500 | local_irq_save(flags); \ | 204 | local_irq_save(flags); \ |
501 | _spin_trylock(lock) ? \ | 205 | _spin_trylock(lock) ? \ |
502 | 1 : ({local_irq_restore(flags); 0;}); \ | 206 | 1 : ({ local_irq_restore(flags); 0; }); \ |
503 | }) | 207 | }) |
504 | 208 | ||
505 | #ifdef CONFIG_LOCKMETER | ||
506 | extern void _metered_spin_lock (spinlock_t *lock); | ||
507 | extern void _metered_spin_unlock (spinlock_t *lock); | ||
508 | extern int _metered_spin_trylock(spinlock_t *lock); | ||
509 | extern void _metered_read_lock (rwlock_t *lock); | ||
510 | extern void _metered_read_unlock (rwlock_t *lock); | ||
511 | extern void _metered_write_lock (rwlock_t *lock); | ||
512 | extern void _metered_write_unlock (rwlock_t *lock); | ||
513 | extern int _metered_read_trylock (rwlock_t *lock); | ||
514 | extern int _metered_write_trylock(rwlock_t *lock); | ||
515 | #endif | ||
516 | |||
517 | /* "lock on reference count zero" */ | ||
518 | #ifndef ATOMIC_DEC_AND_LOCK | ||
519 | #include <asm/atomic.h> | ||
520 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | ||
521 | #endif | ||
522 | |||
523 | #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) | ||
524 | |||
525 | /* | ||
526 | * bit-based spin_lock() | ||
527 | * | ||
528 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | ||
529 | * are significantly faster. | ||
530 | */ | ||
531 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | ||
532 | { | ||
533 | /* | ||
534 | * Assuming the lock is uncontended, this never enters | ||
535 | * the body of the outer loop. If it is contended, then | ||
536 | * within the inner loop a non-atomic test is used to | ||
537 | * busywait with less bus contention for a good time to | ||
538 | * attempt to acquire the lock bit. | ||
539 | */ | ||
540 | preempt_disable(); | ||
541 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
542 | while (test_and_set_bit(bitnum, addr)) { | ||
543 | while (test_bit(bitnum, addr)) { | ||
544 | preempt_enable(); | ||
545 | cpu_relax(); | ||
546 | preempt_disable(); | ||
547 | } | ||
548 | } | ||
549 | #endif | ||
550 | __acquire(bitlock); | ||
551 | } | ||
552 | |||
553 | /* | 209 | /* |
554 | * Return true if it was acquired | 210 | * Pull the atomic_t declaration: |
211 | * (asm-mips/atomic.h needs above definitions) | ||
555 | */ | 212 | */ |
556 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | 213 | #include <asm/atomic.h> |
557 | { | 214 | /** |
558 | preempt_disable(); | 215 | * atomic_dec_and_lock - lock on reaching reference count zero |
559 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 216 | * @atomic: the atomic counter |
560 | if (test_and_set_bit(bitnum, addr)) { | 217 | * @lock: the spinlock in question |
561 | preempt_enable(); | ||
562 | return 0; | ||
563 | } | ||
564 | #endif | ||
565 | __acquire(bitlock); | ||
566 | return 1; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * bit-based spin_unlock() | ||
571 | */ | ||
572 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | ||
573 | { | ||
574 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
575 | BUG_ON(!test_bit(bitnum, addr)); | ||
576 | smp_mb__before_clear_bit(); | ||
577 | clear_bit(bitnum, addr); | ||
578 | #endif | ||
579 | preempt_enable(); | ||
580 | __release(bitlock); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * Return true if the lock is held. | ||
585 | */ | 218 | */ |
586 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | 219 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
587 | { | 220 | #define atomic_dec_and_lock(atomic, lock) \ |
588 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 221 | __cond_lock(_atomic_dec_and_lock(atomic, lock)) |
589 | return test_bit(bitnum, addr); | ||
590 | #elif defined CONFIG_PREEMPT | ||
591 | return preempt_count(); | ||
592 | #else | ||
593 | return 1; | ||
594 | #endif | ||
595 | } | ||
596 | |||
597 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
598 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
599 | 222 | ||
600 | /** | 223 | /** |
601 | * spin_can_lock - would spin_trylock() succeed? | 224 | * spin_can_lock - would spin_trylock() succeed? |
602 | * @lock: the spinlock in question. | 225 | * @lock: the spinlock in question. |
603 | */ | 226 | */ |
604 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | 227 | #define spin_can_lock(lock) (!spin_is_locked(lock)) |
605 | 228 | ||
606 | #endif /* __LINUX_SPINLOCK_H */ | 229 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h new file mode 100644 index 000000000000..78e6989ffb54 --- /dev/null +++ b/include/linux/spinlock_api_smp.h | |||
@@ -0,0 +1,57 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_API_SMP_H | ||
2 | #define __LINUX_SPINLOCK_API_SMP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_api_smp.h | ||
10 | * | ||
11 | * spinlock API declarations on SMP (and debug) | ||
12 | * (implemented in kernel/spinlock.c) | ||
13 | * | ||
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
15 | * Released under the General Public License (GPL). | ||
16 | */ | ||
17 | |||
18 | int in_lock_functions(unsigned long addr); | ||
19 | |||
20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | ||
21 | |||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | ||
23 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
24 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | ||
25 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | ||
26 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
27 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | ||
28 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | ||
29 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
30 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | ||
31 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
32 | __acquires(spinlock_t); | ||
33 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
34 | __acquires(rwlock_t); | ||
35 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
36 | __acquires(rwlock_t); | ||
37 | int __lockfunc _spin_trylock(spinlock_t *lock); | ||
38 | int __lockfunc _read_trylock(rwlock_t *lock); | ||
39 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
40 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
41 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | ||
42 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
43 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | ||
44 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | ||
45 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
46 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | ||
47 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | ||
48 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
49 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | ||
50 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
51 | __releases(spinlock_t); | ||
52 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
53 | __releases(rwlock_t); | ||
54 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
55 | __releases(rwlock_t); | ||
56 | |||
57 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | ||
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h new file mode 100644 index 000000000000..cd81cee566f4 --- /dev/null +++ b/include/linux/spinlock_api_up.h | |||
@@ -0,0 +1,80 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_API_UP_H | ||
2 | #define __LINUX_SPINLOCK_API_UP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_api_up.h | ||
10 | * | ||
11 | * spinlock API implementation on UP-nondebug (inlined implementation) | ||
12 | * | ||
13 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
14 | * Released under the General Public License (GPL). | ||
15 | */ | ||
16 | |||
17 | #define in_lock_functions(ADDR) 0 | ||
18 | |||
19 | #define assert_spin_locked(lock) do { (void)(lock); } while (0) | ||
20 | |||
21 | /* | ||
22 | * In the UP-nondebug case there's no real locking going on, so the | ||
23 | * only thing we have to do is to keep the preempt counts and irq | ||
24 | * flags straight, to supress compiler warnings of unused lock | ||
25 | * variables, and to add the proper checker annotations: | ||
26 | */ | ||
27 | #define __LOCK(lock) \ | ||
28 | do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) | ||
29 | |||
30 | #define __LOCK_BH(lock) \ | ||
31 | do { local_bh_disable(); __LOCK(lock); } while (0) | ||
32 | |||
33 | #define __LOCK_IRQ(lock) \ | ||
34 | do { local_irq_disable(); __LOCK(lock); } while (0) | ||
35 | |||
36 | #define __LOCK_IRQSAVE(lock, flags) \ | ||
37 | do { local_irq_save(flags); __LOCK(lock); } while (0) | ||
38 | |||
39 | #define __UNLOCK(lock) \ | ||
40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | ||
41 | |||
42 | #define __UNLOCK_BH(lock) \ | ||
43 | do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) | ||
44 | |||
45 | #define __UNLOCK_IRQ(lock) \ | ||
46 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | ||
47 | |||
48 | #define __UNLOCK_IRQRESTORE(lock, flags) \ | ||
49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | ||
50 | |||
51 | #define _spin_lock(lock) __LOCK(lock) | ||
52 | #define _read_lock(lock) __LOCK(lock) | ||
53 | #define _write_lock(lock) __LOCK(lock) | ||
54 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | ||
55 | #define _read_lock_bh(lock) __LOCK_BH(lock) | ||
56 | #define _write_lock_bh(lock) __LOCK_BH(lock) | ||
57 | #define _spin_lock_irq(lock) __LOCK_IRQ(lock) | ||
58 | #define _read_lock_irq(lock) __LOCK_IRQ(lock) | ||
59 | #define _write_lock_irq(lock) __LOCK_IRQ(lock) | ||
60 | #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
61 | #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
62 | #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | ||
63 | #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) | ||
64 | #define _read_trylock(lock) ({ __LOCK(lock); 1; }) | ||
65 | #define _write_trylock(lock) ({ __LOCK(lock); 1; }) | ||
66 | #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) | ||
67 | #define _spin_unlock(lock) __UNLOCK(lock) | ||
68 | #define _read_unlock(lock) __UNLOCK(lock) | ||
69 | #define _write_unlock(lock) __UNLOCK(lock) | ||
70 | #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) | ||
71 | #define _write_unlock_bh(lock) __UNLOCK_BH(lock) | ||
72 | #define _read_unlock_bh(lock) __UNLOCK_BH(lock) | ||
73 | #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
74 | #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
75 | #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) | ||
76 | #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
77 | #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
78 | #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | ||
79 | |||
80 | #endif /* __LINUX_SPINLOCK_API_UP_H */ | ||
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h new file mode 100644 index 000000000000..9cb51e070390 --- /dev/null +++ b/include/linux/spinlock_types.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
2 | #define __LINUX_SPINLOCK_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * include/linux/spinlock_types.h - generic spinlock type definitions | ||
6 | * and initializers | ||
7 | * | ||
8 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
9 | * Released under the General Public License (GPL). | ||
10 | */ | ||
11 | |||
12 | #if defined(CONFIG_SMP) | ||
13 | # include <asm/spinlock_types.h> | ||
14 | #else | ||
15 | # include <linux/spinlock_types_up.h> | ||
16 | #endif | ||
17 | |||
18 | typedef struct { | ||
19 | raw_spinlock_t raw_lock; | ||
20 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
21 | unsigned int break_lock; | ||
22 | #endif | ||
23 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
24 | unsigned int magic, owner_cpu; | ||
25 | void *owner; | ||
26 | #endif | ||
27 | } spinlock_t; | ||
28 | |||
29 | #define SPINLOCK_MAGIC 0xdead4ead | ||
30 | |||
31 | typedef struct { | ||
32 | raw_rwlock_t raw_lock; | ||
33 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | ||
34 | unsigned int break_lock; | ||
35 | #endif | ||
36 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
37 | unsigned int magic, owner_cpu; | ||
38 | void *owner; | ||
39 | #endif | ||
40 | } rwlock_t; | ||
41 | |||
42 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
43 | |||
44 | #define SPINLOCK_OWNER_INIT ((void *)-1L) | ||
45 | |||
46 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
47 | # define SPIN_LOCK_UNLOCKED \ | ||
48 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | ||
49 | .magic = SPINLOCK_MAGIC, \ | ||
50 | .owner = SPINLOCK_OWNER_INIT, \ | ||
51 | .owner_cpu = -1 } | ||
52 | #define RW_LOCK_UNLOCKED \ | ||
53 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | ||
54 | .magic = RWLOCK_MAGIC, \ | ||
55 | .owner = SPINLOCK_OWNER_INIT, \ | ||
56 | .owner_cpu = -1 } | ||
57 | #else | ||
58 | # define SPIN_LOCK_UNLOCKED \ | ||
59 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } | ||
60 | #define RW_LOCK_UNLOCKED \ | ||
61 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } | ||
62 | #endif | ||
63 | |||
64 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | ||
65 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | ||
66 | |||
67 | #endif /* __LINUX_SPINLOCK_TYPES_H */ | ||
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h new file mode 100644 index 000000000000..def2d173a8db --- /dev/null +++ b/include/linux/spinlock_types_up.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_TYPES_UP_H | ||
2 | #define __LINUX_SPINLOCK_TYPES_UP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_types_up.h - spinlock type definitions for UP | ||
10 | * | ||
11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
12 | * Released under the General Public License (GPL). | ||
13 | */ | ||
14 | |||
15 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
16 | |||
17 | typedef struct { | ||
18 | volatile unsigned int slock; | ||
19 | } raw_spinlock_t; | ||
20 | |||
21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | ||
22 | |||
23 | #else | ||
24 | |||
25 | /* | ||
26 | * All gcc 2.95 versions and early versions of 2.96 have a nasty bug | ||
27 | * with empty initializers. | ||
28 | */ | ||
29 | #if (__GNUC__ > 2) | ||
30 | typedef struct { } raw_spinlock_t; | ||
31 | |||
32 | #define __RAW_SPIN_LOCK_UNLOCKED { } | ||
33 | #else | ||
34 | typedef struct { int gcc_is_buggy; } raw_spinlock_t; | ||
35 | #define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 } | ||
36 | #endif | ||
37 | |||
38 | #endif | ||
39 | |||
40 | #if (__GNUC__ > 2) | ||
41 | typedef struct { | ||
42 | /* no debug version on UP */ | ||
43 | } raw_rwlock_t; | ||
44 | |||
45 | #define __RAW_RW_LOCK_UNLOCKED { } | ||
46 | #else | ||
47 | typedef struct { int gcc_is_buggy; } raw_rwlock_t; | ||
48 | #define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 } | ||
49 | #endif | ||
50 | |||
51 | #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ | ||
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h new file mode 100644 index 000000000000..31accf2f0b13 --- /dev/null +++ b/include/linux/spinlock_up.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef __LINUX_SPINLOCK_UP_H | ||
2 | #define __LINUX_SPINLOCK_UP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/spinlock_up.h - UP-debug version of spinlocks. | ||
10 | * | ||
11 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
12 | * Released under the General Public License (GPL). | ||
13 | * | ||
14 | * In the debug case, 1 means unlocked, 0 means locked. (the values | ||
15 | * are inverted, to catch initialization bugs) | ||
16 | * | ||
17 | * No atomicity anywhere, we are on UP. | ||
18 | */ | ||
19 | |||
20 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
21 | |||
22 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | ||
23 | |||
24 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
25 | { | ||
26 | lock->slock = 0; | ||
27 | } | ||
28 | |||
29 | static inline void | ||
30 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
31 | { | ||
32 | local_irq_save(flags); | ||
33 | lock->slock = 0; | ||
34 | } | ||
35 | |||
36 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
37 | { | ||
38 | char oldval = lock->slock; | ||
39 | |||
40 | lock->slock = 0; | ||
41 | |||
42 | return oldval > 0; | ||
43 | } | ||
44 | |||
45 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
46 | { | ||
47 | lock->slock = 1; | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Read-write spinlocks. No debug version. | ||
52 | */ | ||
53 | #define __raw_read_lock(lock) do { (void)(lock); } while (0) | ||
54 | #define __raw_write_lock(lock) do { (void)(lock); } while (0) | ||
55 | #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) | ||
56 | #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) | ||
57 | #define __raw_read_unlock(lock) do { (void)(lock); } while (0) | ||
58 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | ||
59 | |||
60 | #else /* DEBUG_SPINLOCK */ | ||
61 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | ||
62 | /* for sched.c and kernel_lock.c: */ | ||
63 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | ||
64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | ||
65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | ||
66 | #endif /* DEBUG_SPINLOCK */ | ||
67 | |||
68 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | ||
69 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | ||
70 | |||
71 | #define __raw_spin_unlock_wait(lock) \ | ||
72 | do { cpu_relax(); } while (__raw_spin_is_locked(lock)) | ||
73 | |||
74 | #endif /* __LINUX_SPINLOCK_UP_H */ | ||
diff --git a/include/linux/time.h b/include/linux/time.h index c10d4c21c183..8e83f4e778bb 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -28,17 +28,10 @@ struct timezone { | |||
28 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
29 | 29 | ||
30 | /* Parameters used to convert the timespec values */ | 30 | /* Parameters used to convert the timespec values */ |
31 | #ifndef USEC_PER_SEC | 31 | #define MSEC_PER_SEC (1000L) |
32 | #define USEC_PER_SEC (1000000L) | 32 | #define USEC_PER_SEC (1000000L) |
33 | #endif | ||
34 | |||
35 | #ifndef NSEC_PER_SEC | ||
36 | #define NSEC_PER_SEC (1000000000L) | 33 | #define NSEC_PER_SEC (1000000000L) |
37 | #endif | ||
38 | |||
39 | #ifndef NSEC_PER_USEC | ||
40 | #define NSEC_PER_USEC (1000L) | 34 | #define NSEC_PER_USEC (1000L) |
41 | #endif | ||
42 | 35 | ||
43 | static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) | 36 | static __inline__ int timespec_equal(struct timespec *a, struct timespec *b) |
44 | { | 37 | { |
diff --git a/include/linux/timer.h b/include/linux/timer.h index 221f81ac2002..3340f3bd135d 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -32,6 +32,10 @@ extern struct timer_base_s __init_timer_base; | |||
32 | .magic = TIMER_MAGIC, \ | 32 | .magic = TIMER_MAGIC, \ |
33 | } | 33 | } |
34 | 34 | ||
35 | #define DEFINE_TIMER(_name, _function, _expires, _data) \ | ||
36 | struct timer_list _name = \ | ||
37 | TIMER_INITIALIZER(_function, _expires, _data) | ||
38 | |||
35 | void fastcall init_timer(struct timer_list * timer); | 39 | void fastcall init_timer(struct timer_list * timer); |
36 | 40 | ||
37 | /*** | 41 | /*** |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 59ff42c629ec..1267f88ece6e 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -74,7 +74,8 @@ struct screen_info { | |||
74 | u16 vesapm_off; /* 0x30 */ | 74 | u16 vesapm_off; /* 0x30 */ |
75 | u16 pages; /* 0x32 */ | 75 | u16 pages; /* 0x32 */ |
76 | u16 vesa_attributes; /* 0x34 */ | 76 | u16 vesa_attributes; /* 0x34 */ |
77 | /* 0x36 -- 0x3f reserved for future expansion */ | 77 | u32 capabilities; /* 0x36 */ |
78 | /* 0x3a -- 0x3f reserved for future expansion */ | ||
78 | }; | 79 | }; |
79 | 80 | ||
80 | extern struct screen_info screen_info; | 81 | extern struct screen_info screen_info; |
diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 9d6fbde3d29c..1cc8c31b7988 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/version.h> | ||
7 | 6 | ||
8 | #define HAVE_V4L2 1 | 7 | #define HAVE_V4L2 1 |
9 | #include <linux/videodev2.h> | 8 | #include <linux/videodev2.h> |
@@ -29,7 +28,6 @@ struct video_device | |||
29 | void (*release)(struct video_device *vfd); | 28 | void (*release)(struct video_device *vfd); |
30 | 29 | ||
31 | 30 | ||
32 | #if 1 /* to be removed in 2.7.x */ | ||
33 | /* obsolete -- fops->owner is used instead */ | 31 | /* obsolete -- fops->owner is used instead */ |
34 | struct module *owner; | 32 | struct module *owner; |
35 | /* dev->driver_data will be used instead some day. | 33 | /* dev->driver_data will be used instead some day. |
@@ -37,7 +35,6 @@ struct video_device | |||
37 | * so the switch over will be transparent for you. | 35 | * so the switch over will be transparent for you. |
38 | * Or use {pci|usb}_{get|set}_drvdata() directly. */ | 36 | * Or use {pci|usb}_{get|set}_drvdata() directly. */ |
39 | void *priv; | 37 | void *priv; |
40 | #endif | ||
41 | 38 | ||
42 | /* for videodev.c intenal usage -- please don't touch */ | 39 | /* for videodev.c intenal usage -- please don't touch */ |
43 | int users; /* video_exclusive_{open|close} ... */ | 40 | int users; /* video_exclusive_{open|close} ... */ |
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index acbfc525576d..f623a33b9abe 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
@@ -270,7 +270,6 @@ struct v4l2_timecode | |||
270 | /* The above is based on SMPTE timecodes */ | 270 | /* The above is based on SMPTE timecodes */ |
271 | 271 | ||
272 | 272 | ||
273 | #if 1 | ||
274 | /* | 273 | /* |
275 | * M P E G C O M P R E S S I O N P A R A M E T E R S | 274 | * M P E G C O M P R E S S I O N P A R A M E T E R S |
276 | * | 275 | * |
@@ -357,7 +356,6 @@ struct v4l2_mpeg_compression { | |||
357 | /* I don't expect the above being perfect yet ;) */ | 356 | /* I don't expect the above being perfect yet ;) */ |
358 | __u32 reserved_5[8]; | 357 | __u32 reserved_5[8]; |
359 | }; | 358 | }; |
360 | #endif | ||
361 | 359 | ||
362 | struct v4l2_jpegcompression | 360 | struct v4l2_jpegcompression |
363 | { | 361 | { |
@@ -871,10 +869,8 @@ struct v4l2_streamparm | |||
871 | #define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) | 869 | #define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) |
872 | #define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) | 870 | #define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) |
873 | #define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) | 871 | #define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) |
874 | #if 1 /* experimental */ | ||
875 | #define VIDIOC_G_MPEGCOMP _IOR ('V', 6, struct v4l2_mpeg_compression) | 872 | #define VIDIOC_G_MPEGCOMP _IOR ('V', 6, struct v4l2_mpeg_compression) |
876 | #define VIDIOC_S_MPEGCOMP _IOW ('V', 7, struct v4l2_mpeg_compression) | 873 | #define VIDIOC_S_MPEGCOMP _IOW ('V', 7, struct v4l2_mpeg_compression) |
877 | #endif | ||
878 | #define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) | 874 | #define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) |
879 | #define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) | 875 | #define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) |
880 | #define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) | 876 | #define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 542dbaee6512..343d883d69c5 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -109,8 +109,6 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); | |||
109 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); | 109 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
110 | int sync_page_range(struct inode *inode, struct address_space *mapping, | 110 | int sync_page_range(struct inode *inode, struct address_space *mapping, |
111 | loff_t pos, size_t count); | 111 | loff_t pos, size_t count); |
112 | int sync_page_range_nolock(struct inode *inode, struct address_space | ||
113 | *mapping, loff_t pos, size_t count); | ||
114 | 112 | ||
115 | /* pdflush.c */ | 113 | /* pdflush.c */ |
116 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl | 114 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |
diff --git a/include/media/audiochip.h b/include/media/audiochip.h index cd831168fdc1..a7ceee9fc5e9 100644 --- a/include/media/audiochip.h +++ b/include/media/audiochip.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: audiochip.h,v 1.5 2005/06/16 22:59:16 hhackmann Exp $ | ||
3 | */ | 2 | */ |
4 | 3 | ||
5 | #ifndef AUDIOCHIP_H | 4 | #ifndef AUDIOCHIP_H |
diff --git a/include/media/id.h b/include/media/id.h index 801ddef301aa..6d02c94cdc0d 100644 --- a/include/media/id.h +++ b/include/media/id.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: id.h,v 1.4 2005/06/12 04:19:19 mchehab Exp $ | ||
3 | */ | 2 | */ |
4 | 3 | ||
5 | /* FIXME: this temporarely, until these are included in linux/i2c-id.h */ | 4 | /* FIXME: this temporarely, until these are included in linux/i2c-id.h */ |
diff --git a/include/media/ir-common.h b/include/media/ir-common.h index 698670547f16..01b56822df4d 100644 --- a/include/media/ir-common.h +++ b/include/media/ir-common.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: ir-common.h,v 1.9 2005/05/15 19:01:26 mchehab Exp $ | ||
3 | * | 2 | * |
4 | * some common structs and functions to handle infrared remotes via | 3 | * some common structs and functions to handle infrared remotes via |
5 | * input layer ... | 4 | * input layer ... |
@@ -21,11 +20,11 @@ | |||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
22 | */ | 21 | */ |
23 | 22 | ||
24 | #include <linux/version.h> | ||
25 | #include <linux/input.h> | 23 | #include <linux/input.h> |
26 | 24 | ||
27 | 25 | ||
28 | #define IR_TYPE_RC5 1 | 26 | #define IR_TYPE_RC5 1 |
27 | #define IR_TYPE_PD 2 /* Pulse distance encoded IR */ | ||
29 | #define IR_TYPE_OTHER 99 | 28 | #define IR_TYPE_OTHER 99 |
30 | 29 | ||
31 | #define IR_KEYTAB_TYPE u32 | 30 | #define IR_KEYTAB_TYPE u32 |
@@ -60,6 +59,7 @@ void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir, | |||
60 | u32 ir_extract_bits(u32 data, u32 mask); | 59 | u32 ir_extract_bits(u32 data, u32 mask); |
61 | int ir_dump_samples(u32 *samples, int count); | 60 | int ir_dump_samples(u32 *samples, int count); |
62 | int ir_decode_biphase(u32 *samples, int count, int low, int high); | 61 | int ir_decode_biphase(u32 *samples, int count, int low, int high); |
62 | int ir_decode_pulsedistance(u32 *samples, int count, int low, int high); | ||
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Local variables: | 65 | * Local variables: |
diff --git a/include/media/saa7146.h b/include/media/saa7146.h index 3dfb8d670eb7..2a897c3a6a9a 100644 --- a/include/media/saa7146.h +++ b/include/media/saa7146.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef __SAA7146__ | 1 | #ifndef __SAA7146__ |
2 | #define __SAA7146__ | 2 | #define __SAA7146__ |
3 | 3 | ||
4 | #include <linux/version.h> /* for version macros */ | ||
5 | #include <linux/module.h> /* for module-version */ | 4 | #include <linux/module.h> /* for module-version */ |
6 | #include <linux/delay.h> /* for delay-stuff */ | 5 | #include <linux/delay.h> /* for delay-stuff */ |
7 | #include <linux/slab.h> /* for kmalloc/kfree */ | 6 | #include <linux/slab.h> /* for kmalloc/kfree */ |
@@ -15,12 +14,7 @@ | |||
15 | #include <linux/vmalloc.h> /* for vmalloc() */ | 14 | #include <linux/vmalloc.h> /* for vmalloc() */ |
16 | #include <linux/mm.h> /* for vmalloc_to_page() */ | 15 | #include <linux/mm.h> /* for vmalloc_to_page() */ |
17 | 16 | ||
18 | /* ugly, but necessary to build the dvb stuff under 2.4. */ | 17 | #define SAA7146_VERSION_CODE 0x000500 /* 0.5.0 */ |
19 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,51) | ||
20 | #include "dvb_functions.h" | ||
21 | #endif | ||
22 | |||
23 | #define SAA7146_VERSION_CODE KERNEL_VERSION(0,5,0) | ||
24 | 18 | ||
25 | #define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr))) | 19 | #define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr))) |
26 | #define saa7146_read(sxy,adr) readl(sxy->mem+(adr)) | 20 | #define saa7146_read(sxy,adr) readl(sxy->mem+(adr)) |
@@ -33,13 +27,8 @@ extern unsigned int saa7146_debug; | |||
33 | #define DEBUG_VARIABLE saa7146_debug | 27 | #define DEBUG_VARIABLE saa7146_debug |
34 | #endif | 28 | #endif |
35 | 29 | ||
36 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,51) | ||
37 | #define DEBUG_PROLOG printk("%s: %s(): ",__stringify(KBUILD_BASENAME),__FUNCTION__) | ||
38 | #define INFO(x) { printk("%s: ",__stringify(KBUILD_BASENAME)); printk x; } | ||
39 | #else | ||
40 | #define DEBUG_PROLOG printk("%s: %s(): ",__stringify(KBUILD_MODNAME),__FUNCTION__) | 30 | #define DEBUG_PROLOG printk("%s: %s(): ",__stringify(KBUILD_MODNAME),__FUNCTION__) |
41 | #define INFO(x) { printk("%s: ",__stringify(KBUILD_MODNAME)); printk x; } | 31 | #define INFO(x) { printk("%s: ",__stringify(KBUILD_MODNAME)); printk x; } |
42 | #endif | ||
43 | 32 | ||
44 | #define ERR(x) { DEBUG_PROLOG; printk x; } | 33 | #define ERR(x) { DEBUG_PROLOG; printk x; } |
45 | 34 | ||
diff --git a/include/media/tuner.h b/include/media/tuner.h index eeaa15ddee85..4ad08e24a1aa 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h | |||
@@ -1,6 +1,4 @@ | |||
1 | 1 | /* | |
2 | /* $Id: tuner.h,v 1.45 2005/07/28 18:41:21 mchehab Exp $ | ||
3 | * | ||
4 | tuner.h - definition for different tuners | 2 | tuner.h - definition for different tuners |
5 | 3 | ||
6 | Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) | 4 | Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) |
@@ -28,88 +26,90 @@ | |||
28 | 26 | ||
29 | #define ADDR_UNSET (255) | 27 | #define ADDR_UNSET (255) |
30 | 28 | ||
31 | #define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ | 29 | #define TUNER_TEMIC_PAL 0 /* 4002 FH5 (3X 7756, 9483) */ |
32 | #define TUNER_PHILIPS_PAL_I 1 | 30 | #define TUNER_PHILIPS_PAL_I 1 |
33 | #define TUNER_PHILIPS_NTSC 2 | 31 | #define TUNER_PHILIPS_NTSC 2 |
34 | #define TUNER_PHILIPS_SECAM 3 /* you must actively select B/G, L, L` */ | 32 | #define TUNER_PHILIPS_SECAM 3 /* you must actively select B/G, L, L` */ |
35 | 33 | ||
36 | #define TUNER_ABSENT 4 | 34 | #define TUNER_ABSENT 4 |
37 | #define TUNER_PHILIPS_PAL 5 | 35 | #define TUNER_PHILIPS_PAL 5 |
38 | #define TUNER_TEMIC_NTSC 6 /* 4032 FY5 (3X 7004, 9498, 9789) */ | 36 | #define TUNER_TEMIC_NTSC 6 /* 4032 FY5 (3X 7004, 9498, 9789) */ |
39 | #define TUNER_TEMIC_PAL_I 7 /* 4062 FY5 (3X 8501, 9957) */ | 37 | #define TUNER_TEMIC_PAL_I 7 /* 4062 FY5 (3X 8501, 9957) */ |
40 | 38 | ||
41 | #define TUNER_TEMIC_4036FY5_NTSC 8 /* 4036 FY5 (3X 1223, 1981, 7686) */ | 39 | #define TUNER_TEMIC_4036FY5_NTSC 8 /* 4036 FY5 (3X 1223, 1981, 7686) */ |
42 | #define TUNER_ALPS_TSBH1_NTSC 9 | 40 | #define TUNER_ALPS_TSBH1_NTSC 9 |
43 | #define TUNER_ALPS_TSBE1_PAL 10 | 41 | #define TUNER_ALPS_TSBE1_PAL 10 |
44 | #define TUNER_ALPS_TSBB5_PAL_I 11 | 42 | #define TUNER_ALPS_TSBB5_PAL_I 11 |
45 | 43 | ||
46 | #define TUNER_ALPS_TSBE5_PAL 12 | 44 | #define TUNER_ALPS_TSBE5_PAL 12 |
47 | #define TUNER_ALPS_TSBC5_PAL 13 | 45 | #define TUNER_ALPS_TSBC5_PAL 13 |
48 | #define TUNER_TEMIC_4006FH5_PAL 14 /* 4006 FH5 (3X 9500, 9501, 7291) */ | 46 | #define TUNER_TEMIC_4006FH5_PAL 14 /* 4006 FH5 (3X 9500, 9501, 7291) */ |
49 | #define TUNER_ALPS_TSHC6_NTSC 15 | 47 | #define TUNER_ALPS_TSHC6_NTSC 15 |
50 | 48 | ||
51 | #define TUNER_TEMIC_PAL_DK 16 /* 4016 FY5 (3X 1392, 1393) */ | 49 | #define TUNER_TEMIC_PAL_DK 16 /* 4016 FY5 (3X 1392, 1393) */ |
52 | #define TUNER_PHILIPS_NTSC_M 17 | 50 | #define TUNER_PHILIPS_NTSC_M 17 |
53 | #define TUNER_TEMIC_4066FY5_PAL_I 18 /* 4066 FY5 (3X 7032, 7035) */ | 51 | #define TUNER_TEMIC_4066FY5_PAL_I 18 /* 4066 FY5 (3X 7032, 7035) */ |
54 | #define TUNER_TEMIC_4006FN5_MULTI_PAL 19 /* B/G, I and D/K autodetected (3X 7595, 7606, 7657)*/ | 52 | #define TUNER_TEMIC_4006FN5_MULTI_PAL 19 /* B/G, I and D/K autodetected (3X 7595, 7606, 7657) */ |
55 | 53 | ||
56 | #define TUNER_TEMIC_4009FR5_PAL 20 /* incl. FM radio (3X 7607, 7488, 7711)*/ | 54 | #define TUNER_TEMIC_4009FR5_PAL 20 /* incl. FM radio (3X 7607, 7488, 7711) */ |
57 | #define TUNER_TEMIC_4039FR5_NTSC 21 /* incl. FM radio (3X 7246, 7578, 7732)*/ | 55 | #define TUNER_TEMIC_4039FR5_NTSC 21 /* incl. FM radio (3X 7246, 7578, 7732) */ |
58 | #define TUNER_TEMIC_4046FM5 22 /* you must actively select B/G, D/K, I, L, L` ! (3X 7804, 7806, 8103, 8104)*/ | 56 | #define TUNER_TEMIC_4046FM5 22 /* you must actively select B/G, D/K, I, L, L` ! (3X 7804, 7806, 8103, 8104) */ |
59 | #define TUNER_PHILIPS_PAL_DK 23 | 57 | #define TUNER_PHILIPS_PAL_DK 23 |
60 | 58 | ||
61 | #define TUNER_PHILIPS_FQ1216ME 24 /* you must actively select B/G/D/K, I, L, L` */ | 59 | #define TUNER_PHILIPS_FQ1216ME 24 /* you must actively select B/G/D/K, I, L, L` */ |
62 | #define TUNER_LG_PAL_I_FM 25 | 60 | #define TUNER_LG_PAL_I_FM 25 |
63 | #define TUNER_LG_PAL_I 26 | 61 | #define TUNER_LG_PAL_I 26 |
64 | #define TUNER_LG_NTSC_FM 27 | 62 | #define TUNER_LG_NTSC_FM 27 |
65 | 63 | ||
66 | #define TUNER_LG_PAL_FM 28 | 64 | #define TUNER_LG_PAL_FM 28 |
67 | #define TUNER_LG_PAL 29 | 65 | #define TUNER_LG_PAL 29 |
68 | #define TUNER_TEMIC_4009FN5_MULTI_PAL_FM 30 /* B/G, I and D/K autodetected (3X 8155, 8160, 8163)*/ | 66 | #define TUNER_TEMIC_4009FN5_MULTI_PAL_FM 30 /* B/G, I and D/K autodetected (3X 8155, 8160, 8163) */ |
69 | #define TUNER_SHARP_2U5JF5540_NTSC 31 | 67 | #define TUNER_SHARP_2U5JF5540_NTSC 31 |
70 | 68 | ||
71 | #define TUNER_Samsung_PAL_TCPM9091PD27 32 | 69 | #define TUNER_Samsung_PAL_TCPM9091PD27 32 |
72 | #define TUNER_MT2032 33 | 70 | #define TUNER_MT2032 33 |
73 | #define TUNER_TEMIC_4106FH5 34 /* 4106 FH5 (3X 7808, 7865)*/ | 71 | #define TUNER_TEMIC_4106FH5 34 /* 4106 FH5 (3X 7808, 7865) */ |
74 | #define TUNER_TEMIC_4012FY5 35 /* 4012 FY5 (3X 0971, 1099)*/ | 72 | #define TUNER_TEMIC_4012FY5 35 /* 4012 FY5 (3X 0971, 1099) */ |
75 | 73 | ||
76 | #define TUNER_TEMIC_4136FY5 36 /* 4136 FY5 (3X 7708, 7746)*/ | 74 | #define TUNER_TEMIC_4136FY5 36 /* 4136 FY5 (3X 7708, 7746) */ |
77 | #define TUNER_LG_PAL_NEW_TAPC 37 | 75 | #define TUNER_LG_PAL_NEW_TAPC 37 |
78 | #define TUNER_PHILIPS_FM1216ME_MK3 38 | 76 | #define TUNER_PHILIPS_FM1216ME_MK3 38 |
79 | #define TUNER_LG_NTSC_NEW_TAPC 39 | 77 | #define TUNER_LG_NTSC_NEW_TAPC 39 |
80 | 78 | ||
81 | #define TUNER_HITACHI_NTSC 40 | 79 | #define TUNER_HITACHI_NTSC 40 |
82 | #define TUNER_PHILIPS_PAL_MK 41 | 80 | #define TUNER_PHILIPS_PAL_MK 41 |
83 | #define TUNER_PHILIPS_ATSC 42 | 81 | #define TUNER_PHILIPS_ATSC 42 |
84 | #define TUNER_PHILIPS_FM1236_MK3 43 | 82 | #define TUNER_PHILIPS_FM1236_MK3 43 |
85 | 83 | ||
86 | #define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */ | 84 | #define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */ |
87 | /* Microtune mergeged with Temic 12/31/1999 partially financed by Alps - these may be similar to Temic */ | 85 | /* Microtune mergeged with Temic 12/31/1999 partially financed by Alps - these may be similar to Temic */ |
88 | #define TUNER_MICROTUNE_4049FM5 45 | 86 | #define TUNER_MICROTUNE_4049FM5 45 |
89 | #define TUNER_LG_NTSC_TAPE 47 | 87 | #define TUNER_MICROTUNE_4042_FI5 46 |
90 | 88 | #define TUNER_LG_NTSC_TAPE 47 | |
91 | #define TUNER_TNF_8831BGFF 48 | 89 | |
92 | #define TUNER_MICROTUNE_4042FI5 49 /* DViCO FusionHDTV 3 Gold-Q - 4042 FI5 (3X 8147) */ | 90 | #define TUNER_TNF_8831BGFF 48 |
93 | #define TUNER_TCL_2002N 50 | 91 | #define TUNER_MICROTUNE_4042FI5 49 /* DViCO FusionHDTV 3 Gold-Q - 4042 FI5 (3X 8147) */ |
94 | #define TUNER_PHILIPS_FM1256_IH3 51 | 92 | #define TUNER_TCL_2002N 50 |
95 | 93 | #define TUNER_PHILIPS_FM1256_IH3 51 | |
96 | #define TUNER_THOMSON_DTT7610 52 | 94 | |
97 | #define TUNER_PHILIPS_FQ1286 53 | 95 | #define TUNER_THOMSON_DTT7610 52 |
98 | #define TUNER_PHILIPS_TDA8290 54 | 96 | #define TUNER_PHILIPS_FQ1286 53 |
99 | #define TUNER_LG_PAL_TAPE 55 /* Hauppauge PVR-150 PAL */ | 97 | #define TUNER_PHILIPS_TDA8290 54 |
100 | 98 | #define TUNER_LG_PAL_TAPE 55 /* Hauppauge PVR-150 PAL */ | |
101 | #define TUNER_PHILIPS_FQ1216AME_MK4 56 /* Hauppauge PVR-150 PAL */ | 99 | |
102 | #define TUNER_PHILIPS_FQ1236A_MK4 57 /* Hauppauge PVR-500MCE NTSC */ | 100 | #define TUNER_PHILIPS_FQ1216AME_MK4 56 /* Hauppauge PVR-150 PAL */ |
103 | 101 | #define TUNER_PHILIPS_FQ1236A_MK4 57 /* Hauppauge PVR-500MCE NTSC */ | |
104 | #define TUNER_YMEC_TVF_8531MF 58 | 102 | #define TUNER_YMEC_TVF_8531MF 58 |
105 | #define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */ | 103 | #define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */ |
106 | #define TUNER_THOMSON_DTT7611 60 /* DViCO FusionHDTV 3 Gold-T */ | 104 | |
107 | #define TUNER_TENA_9533_DI 61 | 105 | #define TUNER_THOMSON_DTT7611 60 /* DViCO FusionHDTV 3 Gold-T */ |
108 | 106 | #define TUNER_TENA_9533_DI 61 | |
109 | #define TUNER_TEA5767 62 /* Only FM Radio Tuner */ | 107 | #define TUNER_TEA5767 62 /* Only FM Radio Tuner */ |
110 | #define TUNER_PHILIPS_FMD1216ME_MK3 63 | 108 | #define TUNER_PHILIPS_FMD1216ME_MK3 63 |
111 | #define TUNER_LG_TDVS_H062F 64 /* DViCO FusionHDTV 5 */ | 109 | |
112 | #define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ | 110 | #define TUNER_LG_TDVS_H062F 64 /* DViCO FusionHDTV 5 */ |
111 | #define TUNER_YMEC_TVF66T5_B_DFF 65 /* Acorp Y878F */ | ||
112 | #define TUNER_LG_NTSC_TALN_MINI 66 | ||
113 | 113 | ||
114 | #define NOTUNER 0 | 114 | #define NOTUNER 0 |
115 | #define PAL 1 /* PAL_BG */ | 115 | #define PAL 1 /* PAL_BG */ |
@@ -117,7 +117,7 @@ | |||
117 | #define NTSC 3 | 117 | #define NTSC 3 |
118 | #define SECAM 4 | 118 | #define SECAM 4 |
119 | #define ATSC 5 | 119 | #define ATSC 5 |
120 | #define RADIO 6 | 120 | #define RADIO 6 |
121 | 121 | ||
122 | #define NoTuner 0 | 122 | #define NoTuner 0 |
123 | #define Philips 1 | 123 | #define Philips 1 |
@@ -134,6 +134,7 @@ | |||
134 | #define THOMSON 12 | 134 | #define THOMSON 12 |
135 | 135 | ||
136 | #define TUNER_SET_TYPE_ADDR _IOW('T',3,int) | 136 | #define TUNER_SET_TYPE_ADDR _IOW('T',3,int) |
137 | #define TUNER_SET_STANDBY _IOW('T',4,int) | ||
137 | #define TDA9887_SET_CONFIG _IOW('t',5,int) | 138 | #define TDA9887_SET_CONFIG _IOW('t',5,int) |
138 | 139 | ||
139 | /* tv card specific */ | 140 | /* tv card specific */ |
@@ -153,9 +154,6 @@ | |||
153 | 154 | ||
154 | #ifdef __KERNEL__ | 155 | #ifdef __KERNEL__ |
155 | 156 | ||
156 | #define I2C_ADDR_TDA8290 0x4b | ||
157 | #define I2C_ADDR_TDA8275 0x61 | ||
158 | |||
159 | enum tuner_mode { | 157 | enum tuner_mode { |
160 | T_UNINITIALIZED = 0, | 158 | T_UNINITIALIZED = 0, |
161 | T_RADIO = 1 << V4L2_TUNER_RADIO, | 159 | T_RADIO = 1 << V4L2_TUNER_RADIO, |
@@ -165,21 +163,21 @@ enum tuner_mode { | |||
165 | }; | 163 | }; |
166 | 164 | ||
167 | struct tuner_setup { | 165 | struct tuner_setup { |
168 | unsigned short addr; | 166 | unsigned short addr; |
169 | unsigned int type; | 167 | unsigned int type; |
170 | unsigned int mode_mask; | 168 | unsigned int mode_mask; |
171 | }; | 169 | }; |
172 | 170 | ||
173 | struct tuner { | 171 | struct tuner { |
174 | /* device */ | 172 | /* device */ |
175 | struct i2c_client i2c; | 173 | struct i2c_client i2c; |
176 | 174 | ||
177 | unsigned int type; /* chip type */ | 175 | unsigned int type; /* chip type */ |
178 | 176 | ||
179 | unsigned int mode; | 177 | unsigned int mode; |
180 | unsigned int mode_mask; /* Combination of allowable modes */ | 178 | unsigned int mode_mask; /* Combination of allowable modes */ |
181 | 179 | ||
182 | unsigned int freq; /* keep track of the current settings */ | 180 | unsigned int freq; /* keep track of the current settings */ |
183 | unsigned int audmode; | 181 | unsigned int audmode; |
184 | v4l2_std_id std; | 182 | v4l2_std_id std; |
185 | 183 | ||
@@ -198,6 +196,7 @@ struct tuner { | |||
198 | void (*radio_freq)(struct i2c_client *c, unsigned int freq); | 196 | void (*radio_freq)(struct i2c_client *c, unsigned int freq); |
199 | int (*has_signal)(struct i2c_client *c); | 197 | int (*has_signal)(struct i2c_client *c); |
200 | int (*is_stereo)(struct i2c_client *c); | 198 | int (*is_stereo)(struct i2c_client *c); |
199 | void (*standby)(struct i2c_client *c); | ||
201 | }; | 200 | }; |
202 | 201 | ||
203 | extern unsigned int tuner_debug; | 202 | extern unsigned int tuner_debug; |
@@ -209,16 +208,20 @@ extern int tea5767_tuner_init(struct i2c_client *c); | |||
209 | extern int default_tuner_init(struct i2c_client *c); | 208 | extern int default_tuner_init(struct i2c_client *c); |
210 | extern int tea5767_autodetection(struct i2c_client *c); | 209 | extern int tea5767_autodetection(struct i2c_client *c); |
211 | 210 | ||
212 | #define tuner_warn(fmt, arg...) \ | 211 | #define tuner_warn(fmt, arg...) do {\ |
213 | dev_printk(KERN_WARNING , &t->i2c.dev , fmt , ## arg) | 212 | printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->name, \ |
214 | #define tuner_info(fmt, arg...) \ | 213 | t->i2c.adapter->nr, t->i2c.addr , ##arg); } while (0) |
215 | dev_printk(KERN_INFO , &t->i2c.dev , fmt , ## arg) | 214 | #define tuner_info(fmt, arg...) do {\ |
216 | #define tuner_dbg(fmt, arg...) \ | 215 | printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->name, \ |
217 | if (tuner_debug) dev_printk(KERN_DEBUG , &t->i2c.dev , fmt , ## arg) | 216 | t->i2c.adapter->nr, t->i2c.addr , ##arg); } while (0) |
217 | #define tuner_dbg(fmt, arg...) do {\ | ||
218 | if (tuner_debug) \ | ||
219 | printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->name, \ | ||
220 | t->i2c.adapter->nr, t->i2c.addr , ##arg); } while (0) | ||
218 | 221 | ||
219 | #endif /* __KERNEL__ */ | 222 | #endif /* __KERNEL__ */ |
220 | 223 | ||
221 | #endif | 224 | #endif /* _TUNER_H */ |
222 | 225 | ||
223 | /* | 226 | /* |
224 | * Overrides for Emacs so that we follow Linus's tabbing style. | 227 | * Overrides for Emacs so that we follow Linus's tabbing style. |
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h index 854a2c2f105b..e2035c7da094 100644 --- a/include/media/tveeprom.h +++ b/include/media/tveeprom.h | |||
@@ -1,18 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: tveeprom.h,v 1.2 2005/06/12 04:19:19 mchehab Exp $ | ||
3 | */ | 2 | */ |
4 | 3 | ||
5 | struct tveeprom { | 4 | struct tveeprom { |
6 | u32 has_radio; | 5 | u32 has_radio; |
6 | u32 has_ir; /* 0: no IR, 1: IR present, 2: unknown */ | ||
7 | 7 | ||
8 | u32 tuner_type; | 8 | u32 tuner_type; |
9 | u32 tuner_formats; | 9 | u32 tuner_formats; |
10 | 10 | ||
11 | u32 tuner2_type; | ||
12 | u32 tuner2_formats; | ||
13 | |||
11 | u32 digitizer; | 14 | u32 digitizer; |
12 | u32 digitizer_formats; | 15 | u32 digitizer_formats; |
13 | 16 | ||
14 | u32 audio_processor; | 17 | u32 audio_processor; |
15 | /* a_p_fmts? */ | 18 | u32 decoder_processor; |
16 | 19 | ||
17 | u32 model; | 20 | u32 model; |
18 | u32 revision; | 21 | u32 revision; |
@@ -20,7 +23,7 @@ struct tveeprom { | |||
20 | char rev_str[5]; | 23 | char rev_str[5]; |
21 | }; | 24 | }; |
22 | 25 | ||
23 | void tveeprom_hauppauge_analog(struct tveeprom *tvee, | 26 | void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, |
24 | unsigned char *eeprom_data); | 27 | unsigned char *eeprom_data); |
25 | 28 | ||
26 | int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); | 29 | int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); |
diff --git a/include/media/video-buf.h b/include/media/video-buf.h index ae6da6de98de..ae8d7a000440 100644 --- a/include/media/video-buf.h +++ b/include/media/video-buf.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: video-buf.h,v 1.9 2004/11/07 13:17:15 kraxel Exp $ | ||
3 | * | 2 | * |
4 | * generic helper functions for video4linux capture buffers, to handle | 3 | * generic helper functions for video4linux capture buffers, to handle |
5 | * memory management and PCI DMA. Right now bttv + saa7134 use it. | 4 | * memory management and PCI DMA. Right now bttv + saa7134 use it. |
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index b707a603351b..cb8b6e6ce66c 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h | |||
@@ -151,6 +151,8 @@ struct pcmcia_device { | |||
151 | uniquely define a pcmcia_device */ | 151 | uniquely define a pcmcia_device */ |
152 | struct pcmcia_socket *socket; | 152 | struct pcmcia_socket *socket; |
153 | 153 | ||
154 | char *devname; | ||
155 | |||
154 | u8 device_no; | 156 | u8 device_no; |
155 | 157 | ||
156 | /* the hardware "function" device; certain subdevices can | 158 | /* the hardware "function" device; certain subdevices can |
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 77fe9039209b..5308683c8c41 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h | |||
@@ -290,6 +290,7 @@ struct ib_cm_id { | |||
290 | enum ib_cm_lap_state lap_state; /* internal CM/debug use */ | 290 | enum ib_cm_lap_state lap_state; /* internal CM/debug use */ |
291 | __be32 local_id; | 291 | __be32 local_id; |
292 | __be32 remote_id; | 292 | __be32 remote_id; |
293 | u32 remote_cm_qpn; /* 1 unless redirected */ | ||
293 | }; | 294 | }; |
294 | 295 | ||
295 | /** | 296 | /** |
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index fc6b1c18ffc6..53184a38fdf6 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h | |||
@@ -173,6 +173,27 @@ struct ib_vendor_mad { | |||
173 | u8 data[216]; | 173 | u8 data[216]; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | struct ib_class_port_info | ||
177 | { | ||
178 | u8 base_version; | ||
179 | u8 class_version; | ||
180 | __be16 capability_mask; | ||
181 | u8 reserved[3]; | ||
182 | u8 resp_time_value; | ||
183 | u8 redirect_gid[16]; | ||
184 | __be32 redirect_tcslfl; | ||
185 | __be16 redirect_lid; | ||
186 | __be16 redirect_pkey; | ||
187 | __be32 redirect_qp; | ||
188 | __be32 redirect_qkey; | ||
189 | u8 trap_gid[16]; | ||
190 | __be32 trap_tcslfl; | ||
191 | __be16 trap_lid; | ||
192 | __be16 trap_pkey; | ||
193 | __be32 trap_hlqp; | ||
194 | __be32 trap_qkey; | ||
195 | }; | ||
196 | |||
176 | /** | 197 | /** |
177 | * ib_mad_send_buf - MAD data buffer and work request for sends. | 198 | * ib_mad_send_buf - MAD data buffer and work request for sends. |
178 | * @mad: References an allocated MAD data buffer. The size of the data | 199 | * @mad: References an allocated MAD data buffer. The size of the data |
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index c022edfc49da..a7555c800ecf 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h | |||
@@ -46,7 +46,36 @@ enum { | |||
46 | 46 | ||
47 | IB_SA_METHOD_GET_TABLE = 0x12, | 47 | IB_SA_METHOD_GET_TABLE = 0x12, |
48 | IB_SA_METHOD_GET_TABLE_RESP = 0x92, | 48 | IB_SA_METHOD_GET_TABLE_RESP = 0x92, |
49 | IB_SA_METHOD_DELETE = 0x15 | 49 | IB_SA_METHOD_DELETE = 0x15, |
50 | IB_SA_METHOD_DELETE_RESP = 0x95, | ||
51 | IB_SA_METHOD_GET_MULTI = 0x14, | ||
52 | IB_SA_METHOD_GET_MULTI_RESP = 0x94, | ||
53 | IB_SA_METHOD_GET_TRACE_TBL = 0x13 | ||
54 | }; | ||
55 | |||
56 | enum { | ||
57 | IB_SA_ATTR_CLASS_PORTINFO = 0x01, | ||
58 | IB_SA_ATTR_NOTICE = 0x02, | ||
59 | IB_SA_ATTR_INFORM_INFO = 0x03, | ||
60 | IB_SA_ATTR_NODE_REC = 0x11, | ||
61 | IB_SA_ATTR_PORT_INFO_REC = 0x12, | ||
62 | IB_SA_ATTR_SL2VL_REC = 0x13, | ||
63 | IB_SA_ATTR_SWITCH_REC = 0x14, | ||
64 | IB_SA_ATTR_LINEAR_FDB_REC = 0x15, | ||
65 | IB_SA_ATTR_RANDOM_FDB_REC = 0x16, | ||
66 | IB_SA_ATTR_MCAST_FDB_REC = 0x17, | ||
67 | IB_SA_ATTR_SM_INFO_REC = 0x18, | ||
68 | IB_SA_ATTR_LINK_REC = 0x20, | ||
69 | IB_SA_ATTR_GUID_INFO_REC = 0x30, | ||
70 | IB_SA_ATTR_SERVICE_REC = 0x31, | ||
71 | IB_SA_ATTR_PARTITION_REC = 0x33, | ||
72 | IB_SA_ATTR_PATH_REC = 0x35, | ||
73 | IB_SA_ATTR_VL_ARB_REC = 0x36, | ||
74 | IB_SA_ATTR_MC_MEMBER_REC = 0x38, | ||
75 | IB_SA_ATTR_TRACE_REC = 0x39, | ||
76 | IB_SA_ATTR_MULTI_PATH_REC = 0x3a, | ||
77 | IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b, | ||
78 | IB_SA_ATTR_INFORM_INFO_REC = 0xf3 | ||
50 | }; | 79 | }; |
51 | 80 | ||
52 | enum ib_sa_selector { | 81 | enum ib_sa_selector { |
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h index 72182d16778b..e4d1654276ad 100644 --- a/include/rdma/ib_user_cm.h +++ b/include/rdma/ib_user_cm.h | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -37,7 +38,7 @@ | |||
37 | 38 | ||
38 | #include <linux/types.h> | 39 | #include <linux/types.h> |
39 | 40 | ||
40 | #define IB_USER_CM_ABI_VERSION 1 | 41 | #define IB_USER_CM_ABI_VERSION 2 |
41 | 42 | ||
42 | enum { | 43 | enum { |
43 | IB_USER_CM_CMD_CREATE_ID, | 44 | IB_USER_CM_CMD_CREATE_ID, |
@@ -60,6 +61,7 @@ enum { | |||
60 | IB_USER_CM_CMD_SEND_SIDR_REP, | 61 | IB_USER_CM_CMD_SEND_SIDR_REP, |
61 | 62 | ||
62 | IB_USER_CM_CMD_EVENT, | 63 | IB_USER_CM_CMD_EVENT, |
64 | IB_USER_CM_CMD_INIT_QP_ATTR, | ||
63 | }; | 65 | }; |
64 | /* | 66 | /* |
65 | * command ABI structures. | 67 | * command ABI structures. |
@@ -71,6 +73,7 @@ struct ib_ucm_cmd_hdr { | |||
71 | }; | 73 | }; |
72 | 74 | ||
73 | struct ib_ucm_create_id { | 75 | struct ib_ucm_create_id { |
76 | __u64 uid; | ||
74 | __u64 response; | 77 | __u64 response; |
75 | }; | 78 | }; |
76 | 79 | ||
@@ -79,9 +82,14 @@ struct ib_ucm_create_id_resp { | |||
79 | }; | 82 | }; |
80 | 83 | ||
81 | struct ib_ucm_destroy_id { | 84 | struct ib_ucm_destroy_id { |
85 | __u64 response; | ||
82 | __u32 id; | 86 | __u32 id; |
83 | }; | 87 | }; |
84 | 88 | ||
89 | struct ib_ucm_destroy_id_resp { | ||
90 | __u32 events_reported; | ||
91 | }; | ||
92 | |||
85 | struct ib_ucm_attr_id { | 93 | struct ib_ucm_attr_id { |
86 | __u64 response; | 94 | __u64 response; |
87 | __u32 id; | 95 | __u32 id; |
@@ -94,6 +102,64 @@ struct ib_ucm_attr_id_resp { | |||
94 | __be32 remote_id; | 102 | __be32 remote_id; |
95 | }; | 103 | }; |
96 | 104 | ||
105 | struct ib_ucm_init_qp_attr { | ||
106 | __u64 response; | ||
107 | __u32 id; | ||
108 | __u32 qp_state; | ||
109 | }; | ||
110 | |||
111 | struct ib_ucm_ah_attr { | ||
112 | __u8 grh_dgid[16]; | ||
113 | __u32 grh_flow_label; | ||
114 | __u16 dlid; | ||
115 | __u16 reserved; | ||
116 | __u8 grh_sgid_index; | ||
117 | __u8 grh_hop_limit; | ||
118 | __u8 grh_traffic_class; | ||
119 | __u8 sl; | ||
120 | __u8 src_path_bits; | ||
121 | __u8 static_rate; | ||
122 | __u8 is_global; | ||
123 | __u8 port_num; | ||
124 | }; | ||
125 | |||
126 | struct ib_ucm_init_qp_attr_resp { | ||
127 | __u32 qp_attr_mask; | ||
128 | __u32 qp_state; | ||
129 | __u32 cur_qp_state; | ||
130 | __u32 path_mtu; | ||
131 | __u32 path_mig_state; | ||
132 | __u32 qkey; | ||
133 | __u32 rq_psn; | ||
134 | __u32 sq_psn; | ||
135 | __u32 dest_qp_num; | ||
136 | __u32 qp_access_flags; | ||
137 | |||
138 | struct ib_ucm_ah_attr ah_attr; | ||
139 | struct ib_ucm_ah_attr alt_ah_attr; | ||
140 | |||
141 | /* ib_qp_cap */ | ||
142 | __u32 max_send_wr; | ||
143 | __u32 max_recv_wr; | ||
144 | __u32 max_send_sge; | ||
145 | __u32 max_recv_sge; | ||
146 | __u32 max_inline_data; | ||
147 | |||
148 | __u16 pkey_index; | ||
149 | __u16 alt_pkey_index; | ||
150 | __u8 en_sqd_async_notify; | ||
151 | __u8 sq_draining; | ||
152 | __u8 max_rd_atomic; | ||
153 | __u8 max_dest_rd_atomic; | ||
154 | __u8 min_rnr_timer; | ||
155 | __u8 port_num; | ||
156 | __u8 timeout; | ||
157 | __u8 retry_cnt; | ||
158 | __u8 rnr_retry; | ||
159 | __u8 alt_port_num; | ||
160 | __u8 alt_timeout; | ||
161 | }; | ||
162 | |||
97 | struct ib_ucm_listen { | 163 | struct ib_ucm_listen { |
98 | __be64 service_id; | 164 | __be64 service_id; |
99 | __be64 service_mask; | 165 | __be64 service_mask; |
@@ -157,6 +223,7 @@ struct ib_ucm_req { | |||
157 | }; | 223 | }; |
158 | 224 | ||
159 | struct ib_ucm_rep { | 225 | struct ib_ucm_rep { |
226 | __u64 uid; | ||
160 | __u64 data; | 227 | __u64 data; |
161 | __u32 id; | 228 | __u32 id; |
162 | __u32 qpn; | 229 | __u32 qpn; |
@@ -232,7 +299,6 @@ struct ib_ucm_event_get { | |||
232 | }; | 299 | }; |
233 | 300 | ||
234 | struct ib_ucm_req_event_resp { | 301 | struct ib_ucm_req_event_resp { |
235 | __u32 listen_id; | ||
236 | /* device */ | 302 | /* device */ |
237 | /* port */ | 303 | /* port */ |
238 | struct ib_ucm_path_rec primary_path; | 304 | struct ib_ucm_path_rec primary_path; |
@@ -287,7 +353,6 @@ struct ib_ucm_apr_event_resp { | |||
287 | }; | 353 | }; |
288 | 354 | ||
289 | struct ib_ucm_sidr_req_event_resp { | 355 | struct ib_ucm_sidr_req_event_resp { |
290 | __u32 listen_id; | ||
291 | /* device */ | 356 | /* device */ |
292 | /* port */ | 357 | /* port */ |
293 | __u16 pkey; | 358 | __u16 pkey; |
@@ -307,6 +372,7 @@ struct ib_ucm_sidr_rep_event_resp { | |||
307 | #define IB_UCM_PRES_ALTERNATE 0x08 | 372 | #define IB_UCM_PRES_ALTERNATE 0x08 |
308 | 373 | ||
309 | struct ib_ucm_event_resp { | 374 | struct ib_ucm_event_resp { |
375 | __u64 uid; | ||
310 | __u32 id; | 376 | __u32 id; |
311 | __u32 event; | 377 | __u32 event; |
312 | __u32 present; | 378 | __u32 present; |
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index 7ebb01c8f996..fd85725391a4 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h | |||
@@ -42,7 +42,7 @@ | |||
42 | * Increment this value if any changes that break userspace ABI | 42 | * Increment this value if any changes that break userspace ABI |
43 | * compatibility are made. | 43 | * compatibility are made. |
44 | */ | 44 | */ |
45 | #define IB_USER_VERBS_ABI_VERSION 1 | 45 | #define IB_USER_VERBS_ABI_VERSION 2 |
46 | 46 | ||
47 | enum { | 47 | enum { |
48 | IB_USER_VERBS_CMD_QUERY_PARAMS, | 48 | IB_USER_VERBS_CMD_QUERY_PARAMS, |
@@ -292,7 +292,14 @@ struct ib_uverbs_create_cq_resp { | |||
292 | }; | 292 | }; |
293 | 293 | ||
294 | struct ib_uverbs_destroy_cq { | 294 | struct ib_uverbs_destroy_cq { |
295 | __u64 response; | ||
295 | __u32 cq_handle; | 296 | __u32 cq_handle; |
297 | __u32 reserved; | ||
298 | }; | ||
299 | |||
300 | struct ib_uverbs_destroy_cq_resp { | ||
301 | __u32 comp_events_reported; | ||
302 | __u32 async_events_reported; | ||
296 | }; | 303 | }; |
297 | 304 | ||
298 | struct ib_uverbs_create_qp { | 305 | struct ib_uverbs_create_qp { |
@@ -372,7 +379,13 @@ struct ib_uverbs_modify_qp_resp { | |||
372 | }; | 379 | }; |
373 | 380 | ||
374 | struct ib_uverbs_destroy_qp { | 381 | struct ib_uverbs_destroy_qp { |
382 | __u64 response; | ||
375 | __u32 qp_handle; | 383 | __u32 qp_handle; |
384 | __u32 reserved; | ||
385 | }; | ||
386 | |||
387 | struct ib_uverbs_destroy_qp_resp { | ||
388 | __u32 events_reported; | ||
376 | }; | 389 | }; |
377 | 390 | ||
378 | struct ib_uverbs_attach_mcast { | 391 | struct ib_uverbs_attach_mcast { |
@@ -416,7 +429,13 @@ struct ib_uverbs_modify_srq { | |||
416 | }; | 429 | }; |
417 | 430 | ||
418 | struct ib_uverbs_destroy_srq { | 431 | struct ib_uverbs_destroy_srq { |
432 | __u64 response; | ||
419 | __u32 srq_handle; | 433 | __u32 srq_handle; |
434 | __u32 reserved; | ||
435 | }; | ||
436 | |||
437 | struct ib_uverbs_destroy_srq_resp { | ||
438 | __u32 events_reported; | ||
420 | }; | 439 | }; |
421 | 440 | ||
422 | #endif /* IB_USER_VERBS_H */ | 441 | #endif /* IB_USER_VERBS_H */ |
diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 389e8ebe9c19..d6361dab0370 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h | |||
@@ -910,11 +910,10 @@ int snd_pcm_format_big_endian(snd_pcm_format_t format); | |||
910 | * Returns 1 if the given PCM format is CPU-endian, 0 if | 910 | * Returns 1 if the given PCM format is CPU-endian, 0 if |
911 | * opposite, or a negative error code if endian not specified. | 911 | * opposite, or a negative error code if endian not specified. |
912 | */ | 912 | */ |
913 | /* int snd_pcm_format_cpu_endian(snd_pcm_format_t format); */ | ||
914 | #ifdef SNDRV_LITTLE_ENDIAN | 913 | #ifdef SNDRV_LITTLE_ENDIAN |
915 | #define snd_pcm_format_cpu_endian snd_pcm_format_little_endian | 914 | #define snd_pcm_format_cpu_endian(format) snd_pcm_format_little_endian(format) |
916 | #else | 915 | #else |
917 | #define snd_pcm_format_cpu_endian snd_pcm_format_big_endian | 916 | #define snd_pcm_format_cpu_endian(format) snd_pcm_format_big_endian(format) |
918 | #endif | 917 | #endif |
919 | int snd_pcm_format_width(snd_pcm_format_t format); /* in bits */ | 918 | int snd_pcm_format_width(snd_pcm_format_t format); /* in bits */ |
920 | int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */ | 919 | int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */ |
diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h index ad3c3be33c03..b82e408e758f 100644 --- a/include/sound/tea575x-tuner.h +++ b/include/sound/tea575x-tuner.h | |||
@@ -34,9 +34,7 @@ struct snd_tea575x_ops { | |||
34 | struct snd_tea575x { | 34 | struct snd_tea575x { |
35 | snd_card_t *card; | 35 | snd_card_t *card; |
36 | struct video_device vd; /* video device */ | 36 | struct video_device vd; /* video device */ |
37 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0) | ||
38 | struct file_operations fops; | 37 | struct file_operations fops; |
39 | #endif | ||
40 | int dev_nr; /* requested device number + 1 */ | 38 | int dev_nr; /* requested device number + 1 */ |
41 | int vd_registered; /* video device is registered */ | 39 | int vd_registered; /* video device is registered */ |
42 | int tea5759; /* 5759 chip is present */ | 40 | int tea5759; /* 5759 chip is present */ |
diff --git a/include/video/cyblafb.h b/include/video/cyblafb.h new file mode 100644 index 000000000000..a9948232b131 --- /dev/null +++ b/include/video/cyblafb.h | |||
@@ -0,0 +1,171 @@ | |||
1 | |||
2 | #ifndef CYBLAFB_DEBUG | ||
3 | #define CYBLAFB_DEBUG 0 | ||
4 | #endif | ||
5 | |||
6 | #if CYBLAFB_DEBUG | ||
7 | #define debug(f,a...) printk("%s:" f, __FUNCTION__ , ## a); | ||
8 | #else | ||
9 | #define debug(f,a...) | ||
10 | #endif | ||
11 | |||
12 | #define output(f, a...) printk("cyblafb: " f, ## a) | ||
13 | |||
14 | #define Kb (1024) | ||
15 | #define Mb (Kb*Kb) | ||
16 | |||
17 | /* PCI IDS of supported cards temporarily here */ | ||
18 | |||
19 | #define CYBERBLADEi1 0x8500 | ||
20 | |||
21 | /* these defines are for 'lcd' variable */ | ||
22 | #define LCD_STRETCH 0 | ||
23 | #define LCD_CENTER 1 | ||
24 | #define LCD_BIOS 2 | ||
25 | |||
26 | /* display types */ | ||
27 | #define DISPLAY_CRT 0 | ||
28 | #define DISPLAY_FP 1 | ||
29 | |||
30 | #define ROP_S 0xCC | ||
31 | |||
32 | #define point(x,y) ((y)<<16|(x)) | ||
33 | |||
34 | // | ||
35 | // Attribute Regs, ARxx, 3c0/3c1 | ||
36 | // | ||
37 | #define AR00 0x00 | ||
38 | #define AR01 0x01 | ||
39 | #define AR02 0x02 | ||
40 | #define AR03 0x03 | ||
41 | #define AR04 0x04 | ||
42 | #define AR05 0x05 | ||
43 | #define AR06 0x06 | ||
44 | #define AR07 0x07 | ||
45 | #define AR08 0x08 | ||
46 | #define AR09 0x09 | ||
47 | #define AR0A 0x0A | ||
48 | #define AR0B 0x0B | ||
49 | #define AR0C 0x0C | ||
50 | #define AR0D 0x0D | ||
51 | #define AR0E 0x0E | ||
52 | #define AR0F 0x0F | ||
53 | #define AR10 0x10 | ||
54 | #define AR12 0x12 | ||
55 | #define AR13 0x13 | ||
56 | |||
57 | // | ||
58 | // Sequencer Regs, SRxx, 3c4/3c5 | ||
59 | // | ||
60 | #define SR00 0x00 | ||
61 | #define SR01 0x01 | ||
62 | #define SR02 0x02 | ||
63 | #define SR03 0x03 | ||
64 | #define SR04 0x04 | ||
65 | #define SR0D 0x0D | ||
66 | #define SR0E 0x0E | ||
67 | #define SR11 0x11 | ||
68 | #define SR18 0x18 | ||
69 | #define SR19 0x19 | ||
70 | |||
71 | // | ||
72 | // | ||
73 | // | ||
74 | #define CR00 0x00 | ||
75 | #define CR01 0x01 | ||
76 | #define CR02 0x02 | ||
77 | #define CR03 0x03 | ||
78 | #define CR04 0x04 | ||
79 | #define CR05 0x05 | ||
80 | #define CR06 0x06 | ||
81 | #define CR07 0x07 | ||
82 | #define CR08 0x08 | ||
83 | #define CR09 0x09 | ||
84 | #define CR0A 0x0A | ||
85 | #define CR0B 0x0B | ||
86 | #define CR0C 0x0C | ||
87 | #define CR0D 0x0D | ||
88 | #define CR0E 0x0E | ||
89 | #define CR0F 0x0F | ||
90 | #define CR10 0x10 | ||
91 | #define CR11 0x11 | ||
92 | #define CR12 0x12 | ||
93 | #define CR13 0x13 | ||
94 | #define CR14 0x14 | ||
95 | #define CR15 0x15 | ||
96 | #define CR16 0x16 | ||
97 | #define CR17 0x17 | ||
98 | #define CR18 0x18 | ||
99 | #define CR19 0x19 | ||
100 | #define CR1A 0x1A | ||
101 | #define CR1B 0x1B | ||
102 | #define CR1C 0x1C | ||
103 | #define CR1D 0x1D | ||
104 | #define CR1E 0x1E | ||
105 | #define CR1F 0x1F | ||
106 | #define CR20 0x20 | ||
107 | #define CR21 0x21 | ||
108 | #define CR27 0x27 | ||
109 | #define CR29 0x29 | ||
110 | #define CR2A 0x2A | ||
111 | #define CR2B 0x2B | ||
112 | #define CR2D 0x2D | ||
113 | #define CR2F 0x2F | ||
114 | #define CR36 0x36 | ||
115 | #define CR38 0x38 | ||
116 | #define CR39 0x39 | ||
117 | #define CR3A 0x3A | ||
118 | #define CR55 0x55 | ||
119 | #define CR56 0x56 | ||
120 | #define CR57 0x57 | ||
121 | #define CR58 0x58 | ||
122 | |||
123 | // | ||
124 | // | ||
125 | // | ||
126 | |||
127 | #define GR00 0x01 | ||
128 | #define GR01 0x01 | ||
129 | #define GR02 0x02 | ||
130 | #define GR03 0x03 | ||
131 | #define GR04 0x04 | ||
132 | #define GR05 0x05 | ||
133 | #define GR06 0x06 | ||
134 | #define GR07 0x07 | ||
135 | #define GR08 0x08 | ||
136 | #define GR0F 0x0F | ||
137 | #define GR20 0x20 | ||
138 | #define GR23 0x23 | ||
139 | #define GR2F 0x2F | ||
140 | #define GR30 0x30 | ||
141 | #define GR31 0x31 | ||
142 | #define GR33 0x33 | ||
143 | #define GR52 0x52 | ||
144 | #define GR53 0x53 | ||
145 | #define GR5D 0x5d | ||
146 | |||
147 | |||
148 | // | ||
149 | // Graphics Engine | ||
150 | // | ||
151 | #define GEBase 0x2100 // could be mapped elsewhere if we like it | ||
152 | #define GE00 (GEBase+0x00) // source 1, p 111 | ||
153 | #define GE04 (GEBase+0x04) // source 2, p 111 | ||
154 | #define GE08 (GEBase+0x08) // destination 1, p 111 | ||
155 | #define GE0C (GEBase+0x0C) // destination 2, p 112 | ||
156 | #define GE20 (GEBase+0x20) // engine status, p 113 | ||
157 | #define GE24 (GEBase+0x24) // reset all GE pointers | ||
158 | #define GE44 (GEBase+0x44) // command register, p 126 | ||
159 | #define GE48 (GEBase+0x48) // raster operation, p 127 | ||
160 | #define GE60 (GEBase+0x60) // foreground color, p 128 | ||
161 | #define GE64 (GEBase+0x64) // background color, p 128 | ||
162 | #define GE6C (GEBase+0x6C) // Pattern and Style, p 129, ok | ||
163 | #define GE9C (GEBase+0x9C) // pixel engine data port, p 125 | ||
164 | #define GEB8 (GEBase+0xB8) // Destination Stride / Buffer Base 0, p 133 | ||
165 | #define GEBC (GEBase+0xBC) // Destination Stride / Buffer Base 1, p 133 | ||
166 | #define GEC0 (GEBase+0xC0) // Destination Stride / Buffer Base 2, p 133 | ||
167 | #define GEC4 (GEBase+0xC4) // Destination Stride / Buffer Base 3, p 133 | ||
168 | #define GEC8 (GEBase+0xC8) // Source Stride / Buffer Base 0, p 133 | ||
169 | #define GECC (GEBase+0xCC) // Source Stride / Buffer Base 1, p 133 | ||
170 | #define GED0 (GEBase+0xD0) // Source Stride / Buffer Base 2, p 133 | ||
171 | #define GED4 (GEBase+0xD4) // Source Stride / Buffer Base 3, p 133 | ||
diff --git a/include/video/sisfb.h b/include/video/sisfb.h index 136bf791643d..e402eb5b3c7a 100644 --- a/include/video/sisfb.h +++ b/include/video/sisfb.h | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001-2004 by Thomas Winischhofer, Vienna, Austria. | 2 | * sisfb.h - definitions for the SiS framebuffer driver |
3 | * | ||
4 | * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria. | ||
3 | * | 5 | * |
4 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -16,8 +18,8 @@ | |||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA |
17 | */ | 19 | */ |
18 | 20 | ||
19 | #ifndef _LINUX_SISFB | 21 | #ifndef _LINUX_SISFB_H_ |
20 | #define _LINUX_SISFB | 22 | #define _LINUX_SISFB_H_ |
21 | 23 | ||
22 | #include <asm/ioctl.h> | 24 | #include <asm/ioctl.h> |
23 | #include <asm/types.h> | 25 | #include <asm/types.h> |
@@ -26,47 +28,35 @@ | |||
26 | /* PUBLIC */ | 28 | /* PUBLIC */ |
27 | /**********************************************/ | 29 | /**********************************************/ |
28 | 30 | ||
29 | /* vbflags */ | 31 | /* vbflags, public (others in sis.h) */ |
30 | #define CRT2_DEFAULT 0x00000001 | 32 | #define CRT2_DEFAULT 0x00000001 |
31 | #define CRT2_LCD 0x00000002 /* TW: Never change the order of the CRT2_XXX entries */ | 33 | #define CRT2_LCD 0x00000002 |
32 | #define CRT2_TV 0x00000004 /* (see SISCycleCRT2Type()) */ | 34 | #define CRT2_TV 0x00000004 |
33 | #define CRT2_VGA 0x00000008 | 35 | #define CRT2_VGA 0x00000008 |
34 | #define TV_NTSC 0x00000010 | 36 | #define TV_NTSC 0x00000010 |
35 | #define TV_PAL 0x00000020 | 37 | #define TV_PAL 0x00000020 |
36 | #define TV_HIVISION 0x00000040 | 38 | #define TV_HIVISION 0x00000040 |
37 | #define TV_YPBPR 0x00000080 | 39 | #define TV_YPBPR 0x00000080 |
38 | #define TV_AVIDEO 0x00000100 | 40 | #define TV_AVIDEO 0x00000100 |
39 | #define TV_SVIDEO 0x00000200 | 41 | #define TV_SVIDEO 0x00000200 |
40 | #define TV_SCART 0x00000400 | 42 | #define TV_SCART 0x00000400 |
41 | #define VB_CONEXANT 0x00000800 /* 661 series only */ | 43 | #define TV_PALM 0x00001000 |
42 | #define VB_TRUMPION VB_CONEXANT /* 300 series only */ | 44 | #define TV_PALN 0x00002000 |
43 | #define TV_PALM 0x00001000 | ||
44 | #define TV_PALN 0x00002000 | ||
45 | #define TV_NTSCJ 0x00001000 | 45 | #define TV_NTSCJ 0x00001000 |
46 | #define VB_302ELV 0x00004000 | 46 | #define TV_CHSCART 0x00008000 |
47 | #define TV_CHSCART 0x00008000 | 47 | #define TV_CHYPBPR525I 0x00010000 |
48 | #define TV_CHYPBPR525I 0x00010000 | ||
49 | #define CRT1_VGA 0x00000000 | 48 | #define CRT1_VGA 0x00000000 |
50 | #define CRT1_LCDA 0x00020000 | 49 | #define CRT1_LCDA 0x00020000 |
51 | #define VGA2_CONNECTED 0x00040000 | 50 | #define VGA2_CONNECTED 0x00040000 |
52 | #define VB_DISPTYPE_CRT1 0x00080000 /* CRT1 connected and used */ | 51 | #define VB_DISPTYPE_CRT1 0x00080000 /* CRT1 connected and used */ |
53 | #define VB_301 0x00100000 /* Video bridge type */ | 52 | #define VB_SINGLE_MODE 0x20000000 /* CRT1 or CRT2; determined by DISPTYPE_CRTx */ |
54 | #define VB_301B 0x00200000 | 53 | #define VB_MIRROR_MODE 0x40000000 /* CRT1 + CRT2 identical (mirror mode) */ |
55 | #define VB_302B 0x00400000 | 54 | #define VB_DUALVIEW_MODE 0x80000000 /* CRT1 + CRT2 independent (dual head mode) */ |
56 | #define VB_30xBDH 0x00800000 /* 30xB DH version (w/o LCD support) */ | ||
57 | #define VB_LVDS 0x01000000 | ||
58 | #define VB_CHRONTEL 0x02000000 | ||
59 | #define VB_301LV 0x04000000 | ||
60 | #define VB_302LV 0x08000000 | ||
61 | #define VB_301C 0x10000000 | ||
62 | #define VB_SINGLE_MODE 0x20000000 /* CRT1 or CRT2; determined by DISPTYPE_CRTx */ | ||
63 | #define VB_MIRROR_MODE 0x40000000 /* CRT1 + CRT2 identical (mirror mode) */ | ||
64 | #define VB_DUALVIEW_MODE 0x80000000 /* CRT1 + CRT2 independent (dual head mode) */ | ||
65 | 55 | ||
66 | /* Aliases: */ | 56 | /* Aliases: */ |
67 | #define CRT2_ENABLE (CRT2_LCD | CRT2_TV | CRT2_VGA) | 57 | #define CRT2_ENABLE (CRT2_LCD | CRT2_TV | CRT2_VGA) |
68 | #define TV_STANDARD (TV_NTSC | TV_PAL | TV_PALM | TV_PALN | TV_NTSCJ) | 58 | #define TV_STANDARD (TV_NTSC | TV_PAL | TV_PALM | TV_PALN | TV_NTSCJ) |
69 | #define TV_INTERFACE (TV_AVIDEO|TV_SVIDEO|TV_SCART|TV_HIVISION|TV_YPBPR|TV_CHSCART|TV_CHYPBPR525I) | 59 | #define TV_INTERFACE (TV_AVIDEO|TV_SVIDEO|TV_SCART|TV_HIVISION|TV_YPBPR|TV_CHSCART|TV_CHYPBPR525I) |
70 | 60 | ||
71 | /* Only if TV_YPBPR is set: */ | 61 | /* Only if TV_YPBPR is set: */ |
72 | #define TV_YPBPR525I TV_NTSC | 62 | #define TV_YPBPR525I TV_NTSC |
@@ -75,89 +65,118 @@ | |||
75 | #define TV_YPBPR1080I TV_PALN | 65 | #define TV_YPBPR1080I TV_PALN |
76 | #define TV_YPBPRALL (TV_YPBPR525I | TV_YPBPR525P | TV_YPBPR750P | TV_YPBPR1080I) | 66 | #define TV_YPBPRALL (TV_YPBPR525I | TV_YPBPR525P | TV_YPBPR750P | TV_YPBPR1080I) |
77 | 67 | ||
78 | #define VB_SISBRIDGE (VB_301|VB_301B|VB_301C|VB_302B|VB_301LV|VB_302LV|VB_302ELV) | ||
79 | #define VB_SISTVBRIDGE (VB_301|VB_301B|VB_301C|VB_302B|VB_301LV|VB_302LV) | ||
80 | #define VB_VIDEOBRIDGE (VB_SISBRIDGE | VB_LVDS | VB_CHRONTEL | VB_CONEXANT) | ||
81 | |||
82 | #define VB_DISPTYPE_DISP2 CRT2_ENABLE | 68 | #define VB_DISPTYPE_DISP2 CRT2_ENABLE |
83 | #define VB_DISPTYPE_CRT2 CRT2_ENABLE | 69 | #define VB_DISPTYPE_CRT2 CRT2_ENABLE |
84 | #define VB_DISPTYPE_DISP1 VB_DISPTYPE_CRT1 | 70 | #define VB_DISPTYPE_DISP1 VB_DISPTYPE_CRT1 |
85 | #define VB_DISPMODE_SINGLE VB_SINGLE_MODE | 71 | #define VB_DISPMODE_SINGLE VB_SINGLE_MODE |
86 | #define VB_DISPMODE_MIRROR VB_MIRROR_MODE | 72 | #define VB_DISPMODE_MIRROR VB_MIRROR_MODE |
87 | #define VB_DISPMODE_DUAL VB_DUALVIEW_MODE | 73 | #define VB_DISPMODE_DUAL VB_DUALVIEW_MODE |
88 | #define VB_DISPLAY_MODE (SINGLE_MODE | MIRROR_MODE | DUALVIEW_MODE) | 74 | #define VB_DISPLAY_MODE (SINGLE_MODE | MIRROR_MODE | DUALVIEW_MODE) |
89 | 75 | ||
90 | /* Structure argument for SISFB_GET_INFO ioctl */ | 76 | /* Structure argument for SISFB_GET_INFO ioctl */ |
91 | typedef struct _SISFB_INFO sisfb_info, *psisfb_info; | 77 | struct sisfb_info { |
92 | 78 | __u32 sisfb_id; /* for identifying sisfb */ | |
93 | struct _SISFB_INFO { | ||
94 | __u32 sisfb_id; /* for identifying sisfb */ | ||
95 | #ifndef SISFB_ID | 79 | #ifndef SISFB_ID |
96 | #define SISFB_ID 0x53495346 /* Identify myself with 'SISF' */ | 80 | #define SISFB_ID 0x53495346 /* Identify myself with 'SISF' */ |
97 | #endif | 81 | #endif |
98 | __u32 chip_id; /* PCI-ID of detected chip */ | 82 | __u32 chip_id; /* PCI-ID of detected chip */ |
99 | __u32 memory; /* video memory in KB which sisfb manages */ | 83 | __u32 memory; /* total video memory in KB */ |
100 | __u32 heapstart; /* heap start (= sisfb "mem" argument) in KB */ | 84 | __u32 heapstart; /* heap start offset in KB */ |
101 | __u8 fbvidmode; /* current sisfb mode */ | 85 | __u8 fbvidmode; /* current sisfb mode */ |
102 | 86 | ||
103 | __u8 sisfb_version; | 87 | __u8 sisfb_version; |
104 | __u8 sisfb_revision; | 88 | __u8 sisfb_revision; |
105 | __u8 sisfb_patchlevel; | 89 | __u8 sisfb_patchlevel; |
106 | 90 | ||
107 | __u8 sisfb_caps; /* sisfb capabilities */ | 91 | __u8 sisfb_caps; /* sisfb capabilities */ |
108 | 92 | ||
109 | __u32 sisfb_tqlen; /* turbo queue length (in KB) */ | 93 | __u32 sisfb_tqlen; /* turbo queue length (in KB) */ |
110 | 94 | ||
111 | __u32 sisfb_pcibus; /* The card's PCI ID */ | 95 | __u32 sisfb_pcibus; /* The card's PCI ID */ |
112 | __u32 sisfb_pcislot; | 96 | __u32 sisfb_pcislot; |
113 | __u32 sisfb_pcifunc; | 97 | __u32 sisfb_pcifunc; |
98 | |||
99 | __u8 sisfb_lcdpdc; /* PanelDelayCompensation */ | ||
100 | |||
101 | __u8 sisfb_lcda; /* Detected status of LCDA for low res/text modes */ | ||
102 | |||
103 | __u32 sisfb_vbflags; | ||
104 | __u32 sisfb_currentvbflags; | ||
105 | |||
106 | __u32 sisfb_scalelcd; | ||
107 | __u32 sisfb_specialtiming; | ||
108 | |||
109 | __u8 sisfb_haveemi; | ||
110 | __u8 sisfb_emi30,sisfb_emi31,sisfb_emi32,sisfb_emi33; | ||
111 | __u8 sisfb_haveemilcd; | ||
114 | 112 | ||
115 | __u8 sisfb_lcdpdc; /* PanelDelayCompensation */ | 113 | __u8 sisfb_lcdpdca; /* PanelDelayCompensation for LCD-via-CRT1 */ |
116 | 114 | ||
117 | __u8 sisfb_lcda; /* Detected status of LCDA for low res/text modes */ | 115 | __u16 sisfb_tvxpos, sisfb_tvypos; /* Warning: Values + 32 ! */ |
118 | 116 | ||
119 | __u32 sisfb_vbflags; | 117 | __u32 sisfb_heapsize; /* heap size (in KB) */ |
120 | __u32 sisfb_currentvbflags; | 118 | __u32 sisfb_videooffset; /* Offset of viewport in video memory (in bytes) */ |
121 | 119 | ||
122 | __u32 sisfb_scalelcd; | 120 | __u32 sisfb_curfstn; /* currently running FSTN/DSTN mode */ |
123 | __u32 sisfb_specialtiming; | 121 | __u32 sisfb_curdstn; |
124 | 122 | ||
125 | __u8 sisfb_haveemi; | 123 | __u16 sisfb_pci_vendor; /* PCI vendor (SiS or XGI) */ |
126 | __u8 sisfb_emi30,sisfb_emi31,sisfb_emi32,sisfb_emi33; | ||
127 | __u8 sisfb_haveemilcd; | ||
128 | 124 | ||
129 | __u8 sisfb_lcdpdca; /* PanelDelayCompensation for LCD-via-CRT1 */ | 125 | __u32 sisfb_vbflags2; /* ivideo->vbflags2 */ |
130 | 126 | ||
131 | __u16 sisfb_tvxpos, sisfb_tvypos; /* Warning: Values + 32 ! */ | 127 | __u8 sisfb_can_post; /* sisfb can POST this card */ |
128 | __u8 sisfb_card_posted; /* card is POSTED */ | ||
129 | __u8 sisfb_was_boot_device; /* This card was the boot video device (ie is primary) */ | ||
132 | 130 | ||
133 | __u8 reserved[208]; /* for future use */ | 131 | __u8 reserved[183]; /* for future use */ |
132 | }; | ||
133 | |||
134 | #define SISFB_CMD_GETVBFLAGS 0x55AA0001 /* no arg; result[1] = vbflags */ | ||
135 | #define SISFB_CMD_SWITCHCRT1 0x55AA0010 /* arg[0]: 99 = query, 0 = off, 1 = on */ | ||
136 | /* more to come */ | ||
137 | |||
138 | #define SISFB_CMD_ERR_OK 0x80000000 /* command succeeded */ | ||
139 | #define SISFB_CMD_ERR_LOCKED 0x80000001 /* sisfb is locked */ | ||
140 | #define SISFB_CMD_ERR_EARLY 0x80000002 /* request before sisfb took over gfx system */ | ||
141 | #define SISFB_CMD_ERR_NOVB 0x80000003 /* No video bridge */ | ||
142 | #define SISFB_CMD_ERR_NOCRT2 0x80000004 /* can't change CRT1 status, CRT2 disabled */ | ||
143 | /* more to come */ | ||
144 | #define SISFB_CMD_ERR_UNKNOWN 0x8000ffff /* Unknown command */ | ||
145 | #define SISFB_CMD_ERR_OTHER 0x80010000 /* Other error */ | ||
146 | |||
147 | /* Argument for SISFB_CMD ioctl */ | ||
148 | struct sisfb_cmd { | ||
149 | __u32 sisfb_cmd; | ||
150 | __u32 sisfb_arg[16]; | ||
151 | __u32 sisfb_result[4]; | ||
134 | }; | 152 | }; |
135 | 153 | ||
136 | /* Addtional IOCTLs for communication sisfb <> X driver */ | 154 | /* Addtional IOCTLs for communication sisfb <> X driver */ |
137 | /* If changing this, vgatypes.h must also be changed (for X driver) */ | 155 | /* If changing this, vgatypes.h must also be changed (for X driver) */ |
138 | 156 | ||
139 | /* ioctl for identifying and giving some info (esp. memory heap start) */ | 157 | /* ioctl for identifying and giving some info (esp. memory heap start) */ |
140 | #define SISFB_GET_INFO_SIZE _IOR(0xF3,0x00,__u32) | 158 | #define SISFB_GET_INFO_SIZE _IOR(0xF3,0x00,__u32) |
141 | #define SISFB_GET_INFO _IOR(0xF3,0x01,struct _SISFB_INFO) | 159 | #define SISFB_GET_INFO _IOR(0xF3,0x01,struct sisfb_info) |
142 | 160 | ||
143 | /* ioctrl to get current vertical retrace status */ | 161 | /* ioctrl to get current vertical retrace status */ |
144 | #define SISFB_GET_VBRSTATUS _IOR(0xF3,0x02,__u32) | 162 | #define SISFB_GET_VBRSTATUS _IOR(0xF3,0x02,__u32) |
145 | 163 | ||
146 | /* ioctl to enable/disable panning auto-maximize (like nomax parameter) */ | 164 | /* ioctl to enable/disable panning auto-maximize (like nomax parameter) */ |
147 | #define SISFB_GET_AUTOMAXIMIZE _IOR(0xF3,0x03,__u32) | 165 | #define SISFB_GET_AUTOMAXIMIZE _IOR(0xF3,0x03,__u32) |
148 | #define SISFB_SET_AUTOMAXIMIZE _IOW(0xF3,0x03,__u32) | 166 | #define SISFB_SET_AUTOMAXIMIZE _IOW(0xF3,0x03,__u32) |
149 | 167 | ||
150 | /* ioctls to relocate TV output (x=D[31:16], y=D[15:0], + 32)*/ | 168 | /* ioctls to relocate TV output (x=D[31:16], y=D[15:0], + 32)*/ |
151 | #define SISFB_GET_TVPOSOFFSET _IOR(0xF3,0x04,__u32) | 169 | #define SISFB_GET_TVPOSOFFSET _IOR(0xF3,0x04,__u32) |
152 | #define SISFB_SET_TVPOSOFFSET _IOW(0xF3,0x04,__u32) | 170 | #define SISFB_SET_TVPOSOFFSET _IOW(0xF3,0x04,__u32) |
171 | |||
172 | /* ioctl for internal sisfb commands (sisfbctrl) */ | ||
173 | #define SISFB_COMMAND _IOWR(0xF3,0x05,struct sisfb_cmd) | ||
153 | 174 | ||
154 | /* ioctl for locking sisfb (no register access during lock) */ | 175 | /* ioctl for locking sisfb (no register access during lock) */ |
155 | /* As of now, only used to avoid register access during | 176 | /* As of now, only used to avoid register access during |
156 | * the ioctls listed above. | 177 | * the ioctls listed above. |
157 | */ | 178 | */ |
158 | #define SISFB_SET_LOCK _IOW(0xF3,0x06,__u32) | 179 | #define SISFB_SET_LOCK _IOW(0xF3,0x06,__u32) |
159 | |||
160 | /* more to come soon */ | ||
161 | 180 | ||
162 | /* ioctls 0xF3 up to 0x3F reserved for sisfb */ | 181 | /* ioctls 0xF3 up to 0x3F reserved for sisfb */ |
163 | 182 | ||
@@ -165,7 +184,7 @@ struct _SISFB_INFO { | |||
165 | /* The following are deprecated and should not be used anymore: */ | 184 | /* The following are deprecated and should not be used anymore: */ |
166 | /****************************************************************/ | 185 | /****************************************************************/ |
167 | /* ioctl for identifying and giving some info (esp. memory heap start) */ | 186 | /* ioctl for identifying and giving some info (esp. memory heap start) */ |
168 | #define SISFB_GET_INFO_OLD _IOR('n',0xF8,__u32) | 187 | #define SISFB_GET_INFO_OLD _IOR('n',0xF8,__u32) |
169 | /* ioctrl to get current vertical retrace status */ | 188 | /* ioctrl to get current vertical retrace status */ |
170 | #define SISFB_GET_VBRSTATUS_OLD _IOR('n',0xF9,__u32) | 189 | #define SISFB_GET_VBRSTATUS_OLD _IOR('n',0xF9,__u32) |
171 | /* ioctl to enable/disable panning auto-maximize (like nomax parameter) */ | 190 | /* ioctl to enable/disable panning auto-maximize (like nomax parameter) */ |
@@ -177,8 +196,8 @@ struct _SISFB_INFO { | |||
177 | 196 | ||
178 | /* For fb memory manager (FBIO_ALLOC, FBIO_FREE) */ | 197 | /* For fb memory manager (FBIO_ALLOC, FBIO_FREE) */ |
179 | struct sis_memreq { | 198 | struct sis_memreq { |
180 | __u32 offset; | 199 | __u32 offset; |
181 | __u32 size; | 200 | __u32 size; |
182 | }; | 201 | }; |
183 | 202 | ||
184 | /**********************************************/ | 203 | /**********************************************/ |
@@ -187,12 +206,19 @@ struct sis_memreq { | |||
187 | /**********************************************/ | 206 | /**********************************************/ |
188 | 207 | ||
189 | #ifdef __KERNEL__ | 208 | #ifdef __KERNEL__ |
209 | |||
210 | #include <linux/pci.h> | ||
211 | |||
190 | #define UNKNOWN_VGA 0 | 212 | #define UNKNOWN_VGA 0 |
191 | #define SIS_300_VGA 1 | 213 | #define SIS_300_VGA 1 |
192 | #define SIS_315_VGA 2 | 214 | #define SIS_315_VGA 2 |
193 | 215 | ||
216 | #define SISFB_HAVE_MALLOC_NEW | ||
194 | extern void sis_malloc(struct sis_memreq *req); | 217 | extern void sis_malloc(struct sis_memreq *req); |
218 | extern void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req); | ||
219 | |||
195 | extern void sis_free(u32 base); | 220 | extern void sis_free(u32 base); |
221 | extern void sis_free_new(struct pci_dev *pdev, u32 base); | ||
196 | #endif | 222 | #endif |
197 | 223 | ||
198 | #endif | 224 | #endif |